diff --git a/.github/workflows/sbom-report.yml b/.github/workflows/sbom-report.yml
index 7cc54ed4d50..0208892f413 100644
--- a/.github/workflows/sbom-report.yml
+++ b/.github/workflows/sbom-report.yml
@@ -14,7 +14,7 @@ jobs:
uses: actions/checkout@v4
- name: Anchore SBOM Action
- uses: anchore/sbom-action@v0.15.9
+ uses: anchore/sbom-action@v0.15.10
with:
artifact-name: ${{ github.event.repository.name }}-spdx.json
diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml
index 9a1e347dff0..da675ef7f08 100644
--- a/.github/workflows/test-build-deploy.yml
+++ b/.github/workflows/test-build-deploy.yml
@@ -29,12 +29,12 @@ jobs:
build_image: ${{ steps.build_image_step.outputs.build_image }}
# Determine if we will deploy (aka push) the image to the registry.
is_deploy: ${{ (startsWith(github.ref, 'refs/tags/') || startsWith(github.ref, 'refs/heads/r')) && github.event_name == 'push' && github.repository == 'grafana/mimir' }}
-
+
goversion:
runs-on: ubuntu-latest
needs: prepare
- container:
+ container:
image: ${{ needs.prepare.outputs.build_image }}
steps:
- uses: actions/checkout@v4
@@ -68,7 +68,7 @@ jobs:
run: |
echo "path=$(golangci-lint cache status | grep 'Dir: ' | cut -d ' ' -f2)" >> "$GITHUB_OUTPUT"
- name: Cache golangci-lint cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
key: lint-golangci-lint-${{ runner.os }}-${{ hashFiles('go.mod', 'go.sum', '.golangci.yml', 'Makefile') }}
path: ${{ steps.golangcilintcache.outputs.path }}
@@ -78,14 +78,14 @@ jobs:
echo "gocache=$(go env GOCACHE)" >> "$GITHUB_OUTPUT"
echo "gomodcache=$(go env GOMODCACHE)" >> "$GITHUB_OUTPUT"
- name: Cache Go build cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
key: lint-go-build-${{ runner.os }}-${{ hashFiles('go.mod', 'go.sum') }}
path: ${{ steps.goenv.outputs.gocache }}
# Although we use vendoring, this linting job downloads all modules to verify that what is vendored is correct,
# so it'll use GOMODCACHE. Other jobs don't need this.
- name: Cache Go module cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
key: lint-go-mod-${{ runner.os }}-${{ hashFiles('go.mod', 'go.sum') }}
path: ${{ steps.goenv.outputs.gomodcache }}
@@ -189,7 +189,7 @@ jobs:
mkdir -p /go/src/github.com/grafana/mimir
ln -s $GITHUB_WORKSPACE/* /go/src/github.com/grafana/mimir
- name: Set up Helm
- uses: azure/setup-helm@v3
+ uses: azure/setup-helm@v4
with:
version: v3.8.2
- name: Check Helm Tests
@@ -222,7 +222,7 @@ jobs:
run: |
echo "path=$(go env GOCACHE)" >> "$GITHUB_OUTPUT"
- name: Cache Go build cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
# Cache is shared between test groups.
key: test-go-build-${{ runner.os }}-${{ hashFiles('go.mod', 'go.sum') }}
@@ -262,7 +262,7 @@ jobs:
run: |
echo "path=$(go env GOCACHE)" >> "$GITHUB_OUTPUT"
- name: Cache Go build cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
key: build-go-build-${{ runner.os }}-${{ hashFiles('go.mod', 'go.sum') }}
path: ${{ steps.gocache.outputs.path }}
@@ -331,7 +331,7 @@ jobs:
run: |
echo "path=$(go env GOCACHE)" >> "$GITHUB_OUTPUT"
- name: Cache Go build cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
# Cache is shared between test groups.
key: integration-go-build-${{ runner.os }}-${{ hashFiles('go.mod', 'go.sum') }}
@@ -404,7 +404,7 @@ jobs:
run: |
echo "path=$(go env GOCACHE)" >> "$GITHUB_OUTPUT"
- name: Cache Go build cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
# Cache is shared between test groups.
key: integration-go-build-${{ runner.os }}-${{ hashFiles('go.mod', 'go.sum') }}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c379f3de23a..68e7a43a841 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,10 +4,19 @@
### Grafana Mimir
-* [CHANGE] Querier: the CLI flag `-querier.minimize-ingester-requests` has been moved from "experimental" to "advanced". #7638
+* [CHANGE] Ingester: `/ingester/flush` endpoint is now only allowed to execute only while the ingester is in `Running` state. The 503 status code is returned if the endpoint is called while the ingester is not in `Running` state. #7486
+* [CHANGE] Distributor: Include label name in `err-mimir-label-value-too-long` error message: #7740
+* [FEATURE] Continuous-test: now runable as a module with `mimir -target=continuous-test`. #7747
* [FEATURE] Store-gateway: Allow specific tenants to be enabled or disabled via `-store-gateway.enabled-tenants` or `-store-gateway.disabled-tenants` CLI flags or their corresponding YAML settings. #7653
+* [FEATURE] New `-.s3.bucket-lookup-type` flag configures lookup style type, used to access bucket in s3 compatible providers. #7684
+* [FEATURE] Server: added experimental [PROXY protocol support](https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt). The PROXY protocol support can be enabled via `-server.proxy-protocol-enabled=true`. When enabled, the support is added both to HTTP and gRPC listening ports. #7698
* [ENHANCEMENT] Store-gateway: merge series from different blocks concurrently. #7456
* [ENHANCEMENT] Store-gateway: Add `stage="wait_max_concurrent"` to `cortex_bucket_store_series_request_stage_duration_seconds` which records how long the query had to wait for its turn for `-blocks-storage.bucket-store.max-concurrent`. #7609
+* [ENHANCEMENT] Querier: add `cortex_querier_federation_upstream_query_wait_duration_seconds` to observe time from when a querier picks up a cross-tenant query to when work begins on its single-tenant counterparts. #7209
+* [ENHANCEMENT] Compactor: Add `cortex_compactor_block_compaction_delay_seconds` metric to track how long it takes to compact blocks. #7635
+* [ENHANCEMENT] Store-gateway: add `outcome` label to `cortex_bucket_stores_gate_duration_seconds` histogram metric. Possible values for the `outcome` label are: `rejected_canceled`, `rejected_deadline_exceeded`, `rejected_other`, and `permitted`. #7784
+* [ENHANCEMENT] Query-frontend: use zero-allocation experimental decoder for active series queries via `-query-frontend.use-active-series-decoder`. #7665
+* [ENHANCEMENT] Go: updated to 1.22.2. #7802
* [BUGFIX] Rules: improve error handling when querier is local to the ruler. #7567
* [BUGFIX] Querier, store-gateway: Protect against panics raised during snappy encoding. #7520
* [BUGFIX] Ingester: Prevent timely compaction of empty blocks. #7624
@@ -16,17 +25,39 @@
* [BUGFIX] Query-frontend: Don't panic when using the `-query-frontend.downstream-url` flag. #7651
* [BUGFIX] Ingester: when receiving multiple exemplars for a native histogram via remote write, sort them and only report an error if all are older than the latest exemplar as this could be a partial update. #7640
* [BUGFIX] Ingester: don't retain blocks if they finish exactly on the boundary of the retention window. #7656
+* [BUGFIX] Bug-fixes and improvements to experimental native histograms. #7744
+* [BUGFIX] Querier: return an error when a query uses `label_join` with an invalid destination label name. #7744
+* [BUGFIX] Compactor: correct outstanding job estimation in metrics and `compaction-planner` tool when block labels differ. #7745
+* [BUGFIX] Ingester: turn native histogram validation errors in TSDB into soft ingester errors that result in returning 4xx to the end-user instead of 5xx. In the case of TSDB validation errors, the counter `cortex_discarded_samples_total` will be increased with the `reason` label set to `"invalid-native-histogram"`. #7736 #7773
+* [BUGFIX] Do not wrap error message with `sampled 1/` if it's not actually sampled. #7784
### Mixin
+* [CHANGE] Alerts: Removed obsolete `MimirQueriesIncorrect` alert that used test-exporter metrics. Test-exporter support was however removed in Mimir 2.0 release. #7774
+* [FEATURE] Dashboards: added 'Remote ruler reads networking' dashboard. #7751
* [ENHANCEMENT] Alerts: allow configuring alerts range interval via `_config.base_alerts_range_interval_minutes`. #7591
-* [ENHANCEMENT] Dashboards: Add panels for monitoring distributor and ingester when using ingest-storage. These panels are disabled by default, but can be enabled using `show_ingest_storage_panels: true` config option. Similarly existing panels used when distributors and ingesters use gRPC for forwarding requests can be disabled by setting `show_grpc_ingestion_panels: false`. #7670
+* [ENHANCEMENT] Dashboards: Add panels for monitoring distributor and ingester when using ingest-storage. These panels are disabled by default, but can be enabled using `show_ingest_storage_panels: true` config option. Similarly existing panels used when distributors and ingesters use gRPC for forwarding requests can be disabled by setting `show_grpc_ingestion_panels: false`. #7670 #7699
+* [ENHANCEMENT] Alerts: add the following alerts when using ingest-storage: #7699 #7702
+ * `MimirIngesterLastConsumedOffsetCommitFailed`
+ * `MimirIngesterFailedToReadRecordsFromKafka`
+ * `MimirIngesterKafkaFetchErrorsRateTooHigh`
+ * `MimirStartingIngesterKafkaReceiveDelayIncreasing`
+ * `MimirRunningIngesterReceiveDelayTooHigh`
+ * `MimirIngesterFailsToProcessRecordsFromKafka`
+ * `MimirIngesterFailsEnforceStrongConsistencyOnReadPath`
+* [ENHANCEMENT] Dashboards: add in-flight queries scaling metric panel for ruler-querier. #7749
+* [ENHANCEMENT] Dashboards: renamed rows in the "Remote ruler reads" and "Remote ruler reads resources" dashboards to match the actual component names. #7750
* [ENHANCEMENT] Dashboards: allow switching between using classic of native histograms in dashboards. #7627
* Overview dashboard, Status panel, `cortex_request_duration_seconds` metric.
-* [BUGFIX] Dashobards: Fix regular expression for matching read-path gRPC ingester methods to include querying of exemplars, label-related queries, or active series queries. #7676
+* [BUGFIX] Dashboards: Fix regular expression for matching read-path gRPC ingester methods to include querying of exemplars, label-related queries, or active series queries. #7676
+* [BUGFIX] Dashboards: Fix user id abbreviations and column heads for Top Tenants dashboard. #7724
### Jsonnet
+* [CHANGE] Memcached: Change default read timeout for chunks and index caches to `750ms` from `450ms`. #7778
+* [ENHANCEMENT] Compactor: add `$._config.cortex_compactor_concurrent_rollout_enabled` option (disabled by default) that makes use of rollout-operator to speed up the rollout of compactors. #7783
+* [BUGFIX] Guard against missing samples in KEDA queries. #7691
+
### Mimirtool
* [BUGFIX] Fix panic in `loadgen` subcommand. #7629
@@ -41,18 +72,13 @@
### Documentation
+* [ENHANCEMENT] Clarify Compactor and its storage volume when configured under Kubernetes. #7675
+
### Tools
* [ENHANCEMENT] ulidtime: add option to show random part of ULID, timestamp in milliseconds and header. #7615
-## 2.12.0-rc.1
-
-### Grafana Mimir
-
-* [CHANGE] Querier: the CLI flag `-querier.minimize-ingester-requests` has been moved from "experimental" to "advanced". #7638
-* [BUGFIX] Query-frontend: Fix memory leak on every request. #7654
-
-## 2.12.0-rc.0
+## 2.12.0
### Grafana Mimir
@@ -101,6 +127,7 @@
* [CHANGE] The configuration option `-querier.max-query-into-future` has been deprecated and will be removed in Mimir 2.14. #7496
* [CHANGE] Distributor: the metric `cortex_distributor_sample_delay_seconds` has been deprecated and will be removed in Mimir 2.14. #7516
* [CHANGE] Query-frontend: The deprecated YAML setting `frontend.cache_unaligned_requests` has been moved to `limits.cache_unaligned_requests`. #7519
+* [CHANGE] Querier: the CLI flag `-querier.minimize-ingester-requests` has been moved from "experimental" to "advanced". #7638
* [FEATURE] Introduce `-server.log-source-ips-full` option to log all IPs from `Forwarded`, `X-Real-IP`, `X-Forwarded-For` headers. #7250
* [FEATURE] Introduce `-tenant-federation.max-tenants` option to limit the max number of tenants allowed for requests when federation is enabled. #6959
* [FEATURE] Cardinality API: added a new `count_method` parameter which enables counting active label names. #7085
@@ -174,6 +201,7 @@
* [BUGFIX] Fix metadata API using wrong JSON field names. #7475
* [BUGFIX] Ruler: fix native histogram recording rule result corruption. #7552
* [BUGFIX] Querier: fix HTTP status code translations for remote read requests. Previously, remote-read had conflicting behaviours: when returning samples all internal errors were translated to HTTP 400; when returning chunks all internal errors were translated to HTTP 500. #7487
+* [BUGFIX] Query-frontend: Fix memory leak on every request. #7654
### Mixin
diff --git a/Makefile b/Makefile
index 04e143fae13..edd6a136c64 100644
--- a/Makefile
+++ b/Makefile
@@ -242,7 +242,7 @@ mimir-build-image/$(UPTODATE): mimir-build-image/*
# All the boiler plate for building golang follows:
SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E")
BUILD_IN_CONTAINER ?= true
-LATEST_BUILD_IMAGE_TAG ?= pr7557-ae15c572b6
+LATEST_BUILD_IMAGE_TAG ?= pr7802-42cedde636
# TTY is parameterized to allow Google Cloud Builder to run builds,
# as it currently disallows TTY devices. This value needs to be overridden
diff --git a/VERSION b/VERSION
index 46b81d815a2..d8b698973a4 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.11.0
+2.12.0
diff --git a/cmd/mimir-continuous-test/main.go b/cmd/mimir-continuous-test/main.go
index fb1642c4df6..4cf5a7d26c5 100644
--- a/cmd/mimir-continuous-test/main.go
+++ b/cmd/mimir-continuous-test/main.go
@@ -22,33 +22,23 @@ import (
"github.com/grafana/mimir/pkg/util/version"
)
-type Config struct {
- ServerMetricsPort int
- LogLevel log.Level
- Client continuoustest.ClientConfig
- Manager continuoustest.ManagerConfig
- WriteReadSeriesTest continuoustest.WriteReadSeriesTestConfig
-}
-
-func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
- f.IntVar(&cfg.ServerMetricsPort, "server.metrics-port", 9900, "The port where metrics are exposed.")
- cfg.LogLevel.RegisterFlags(f)
- cfg.Client.RegisterFlags(f)
- cfg.Manager.RegisterFlags(f)
- cfg.WriteReadSeriesTest.RegisterFlags(f)
-}
-
func main() {
// Parse CLI arguments.
- cfg := &Config{}
+ cfg := &continuoustest.Config{}
+ var (
+ serverMetricsPort int
+ logLevel log.Level
+ )
+ flag.CommandLine.IntVar(&serverMetricsPort, "server.metrics-port", 9900, "The port where metrics are exposed.")
cfg.RegisterFlags(flag.CommandLine)
+ logLevel.RegisterFlags(flag.CommandLine)
if err := flagext.ParseFlagsWithoutArguments(flag.CommandLine); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
- util_log.InitLogger(log.LogfmtFormat, cfg.LogLevel, false, util_log.RateLimitedLoggerCfg{})
+ util_log.InitLogger(log.LogfmtFormat, logLevel, false, util_log.RateLimitedLoggerCfg{})
// Setting the environment variable JAEGER_AGENT_HOST enables tracing.
if trace, err := tracing.NewFromEnv("mimir-continuous-test", jaegercfg.MaxTagValueLength(16e3)); err != nil {
@@ -64,7 +54,7 @@ func main() {
registry.MustRegister(version.NewCollector("mimir_continuous_test"))
registry.MustRegister(collectors.NewGoCollector())
- i := instrumentation.NewMetricsServer(cfg.ServerMetricsPort, registry)
+ i := instrumentation.NewMetricsServer(serverMetricsPort, registry)
if err := i.Start(); err != nil {
level.Error(logger).Log("msg", "Unable to start instrumentation server", "err", err.Error())
util_log.Flush()
diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json
index 00e6c3aff90..e9cd27e6f40 100644
--- a/cmd/mimir/config-descriptor.json
+++ b/cmd/mimir/config-descriptor.json
@@ -212,6 +212,17 @@
"fieldType": "int",
"fieldCategory": "advanced"
},
+ {
+ "kind": "field",
+ "name": "proxy_protocol_enabled",
+ "required": false,
+ "desc": "Enables PROXY protocol.",
+ "fieldValue": null,
+ "fieldDefaultValue": false,
+ "fieldFlag": "server.proxy-protocol-enabled",
+ "fieldType": "boolean",
+ "fieldCategory": "experimental"
+ },
{
"kind": "field",
"name": "tls_cipher_suites",
@@ -1060,7 +1071,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "distributor.ha-tracker.etcd.tls-cipher-suites",
@@ -1413,7 +1424,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "distributor.ring.etcd.tls-cipher-suites",
@@ -1777,7 +1788,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "querier.store-gateway-client.tls-cipher-suites",
@@ -2145,7 +2156,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "ingester.client.tls-cipher-suites",
@@ -2513,7 +2524,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "ingester.ring.etcd.tls-cipher-suites",
@@ -4295,7 +4306,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "querier.frontend-client.tls-cipher-suites",
@@ -4557,7 +4568,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "querier.scheduler-client.tls-cipher-suites",
@@ -4948,7 +4959,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "query-frontend.grpc-client-config.tls-cipher-suites",
@@ -5290,7 +5301,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "query-frontend.results-cache.memcached.tls-cipher-suites",
@@ -5582,7 +5593,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "query-frontend.results-cache.redis.tls-cipher-suites",
@@ -5682,6 +5693,17 @@
"fieldType": "boolean",
"fieldCategory": "experimental"
},
+ {
+ "kind": "field",
+ "name": "use_active_series_decoder",
+ "required": false,
+ "desc": "Set to true to use the zero-allocation response decoder for active series queries.",
+ "fieldValue": null,
+ "fieldDefaultValue": false,
+ "fieldFlag": "query-frontend.use-active-series-decoder",
+ "fieldType": "boolean",
+ "fieldCategory": "experimental"
+ },
{
"kind": "field",
"name": "query_result_response_format",
@@ -5812,6 +5834,17 @@
"fieldType": "string",
"fieldCategory": "advanced"
},
+ {
+ "kind": "field",
+ "name": "bucket_lookup_type",
+ "required": false,
+ "desc": "Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: auto, path, virtual-hosted.",
+ "fieldValue": null,
+ "fieldDefaultValue": "auto",
+ "fieldFlag": "blocks-storage.s3.bucket-lookup-type",
+ "fieldType": "string",
+ "fieldCategory": "advanced"
+ },
{
"kind": "field",
"name": "storage_class",
@@ -6635,7 +6668,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "blocks-storage.bucket-store.index-cache.memcached.tls-cipher-suites",
@@ -6927,7 +6960,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "blocks-storage.bucket-store.index-cache.redis.tls-cipher-suites",
@@ -7194,7 +7227,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "blocks-storage.bucket-store.chunks-cache.memcached.tls-cipher-suites",
@@ -7486,7 +7519,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "blocks-storage.bucket-store.chunks-cache.redis.tls-cipher-suites",
@@ -7777,7 +7810,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "blocks-storage.bucket-store.metadata-cache.memcached.tls-cipher-suites",
@@ -8069,7 +8102,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "blocks-storage.bucket-store.metadata-cache.redis.tls-cipher-suites",
@@ -9230,7 +9263,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "compactor.ring.etcd.tls-cipher-suites",
@@ -9691,7 +9724,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "store-gateway.sharding-ring.etcd.tls-cipher-suites",
@@ -10523,7 +10556,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "ruler.client.tls-cipher-suites",
@@ -10729,7 +10762,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "ruler.alertmanager-client.tls-cipher-suites",
@@ -11031,7 +11064,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "ruler.ring.etcd.tls-cipher-suites",
@@ -11488,7 +11521,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "ruler.query-frontend.grpc-client-config.tls-cipher-suites",
@@ -11686,6 +11719,17 @@
"fieldType": "string",
"fieldCategory": "advanced"
},
+ {
+ "kind": "field",
+ "name": "bucket_lookup_type",
+ "required": false,
+ "desc": "Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: auto, path, virtual-hosted.",
+ "fieldValue": null,
+ "fieldDefaultValue": "auto",
+ "fieldFlag": "ruler-storage.s3.bucket-lookup-type",
+ "fieldType": "string",
+ "fieldCategory": "advanced"
+ },
{
"kind": "field",
"name": "storage_class",
@@ -12458,7 +12502,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "ruler-storage.cache.memcached.tls-cipher-suites",
@@ -12750,7 +12794,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "ruler-storage.cache.redis.tls-cipher-suites",
@@ -13078,7 +13122,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "alertmanager.sharding-ring.etcd.tls-cipher-suites",
@@ -13563,7 +13607,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "alertmanager.alertmanager-client.tls-cipher-suites",
@@ -13760,6 +13804,17 @@
"fieldType": "string",
"fieldCategory": "advanced"
},
+ {
+ "kind": "field",
+ "name": "bucket_lookup_type",
+ "required": false,
+ "desc": "Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: auto, path, virtual-hosted.",
+ "fieldValue": null,
+ "fieldDefaultValue": "auto",
+ "fieldFlag": "alertmanager-storage.s3.bucket-lookup-type",
+ "fieldType": "string",
+ "fieldCategory": "advanced"
+ },
{
"kind": "field",
"name": "storage_class",
@@ -14713,7 +14768,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "memberlist.tls-cipher-suites",
@@ -14980,7 +15035,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "query-scheduler.grpc-client-config.tls-cipher-suites",
@@ -15273,7 +15328,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "query-scheduler.ring.etcd.tls-cipher-suites",
@@ -15740,7 +15795,7 @@
"kind": "field",
"name": "tls_cipher_suites",
"required": false,
- "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
+ "desc": "Override the default cipher suite list (separated by commas). Allowed values:\n\nSecure Ciphers:\n- TLS_AES_128_GCM_SHA256\n- TLS_AES_256_GCM_SHA384\n- TLS_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\n- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\n\nInsecure Ciphers:\n- TLS_RSA_WITH_RC4_128_SHA\n- TLS_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA\n- TLS_RSA_WITH_AES_256_CBC_SHA\n- TLS_RSA_WITH_AES_128_CBC_SHA256\n- TLS_RSA_WITH_AES_128_GCM_SHA256\n- TLS_RSA_WITH_AES_256_GCM_SHA384\n- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_RC4_128_SHA\n- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\n- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\n",
"fieldValue": null,
"fieldDefaultValue": "",
"fieldFlag": "overrides-exporter.ring.etcd.tls-cipher-suites",
@@ -16067,6 +16122,17 @@
"fieldType": "string",
"fieldCategory": "advanced"
},
+ {
+ "kind": "field",
+ "name": "bucket_lookup_type",
+ "required": false,
+ "desc": "Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: auto, path, virtual-hosted.",
+ "fieldValue": null,
+ "fieldDefaultValue": "auto",
+ "fieldFlag": "common.storage.s3.bucket-lookup-type",
+ "fieldType": "string",
+ "fieldCategory": "advanced"
+ },
{
"kind": "field",
"name": "storage_class",
diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl
index 269572cc154..6346a92db06 100644
--- a/cmd/mimir/help-all.txt.tmpl
+++ b/cmd/mimir/help-all.txt.tmpl
@@ -29,6 +29,8 @@ Usage of ./cmd/mimir/mimir:
Path at which alertmanager configurations are stored.
-alertmanager-storage.s3.access-key-id string
S3 access key ID
+ -alertmanager-storage.s3.bucket-lookup-type value
+ Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: auto, path, virtual-hosted.
-alertmanager-storage.s3.bucket-name string
S3 bucket name
-alertmanager-storage.s3.endpoint string
@@ -667,6 +669,8 @@ Usage of ./cmd/mimir/mimir:
JSON either from a Google Developers Console client_credentials.json file, or a Google Developers service account key. Needs to be valid JSON, not a filesystem path.
-blocks-storage.s3.access-key-id string
S3 access key ID
+ -blocks-storage.s3.bucket-lookup-type value
+ Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: auto, path, virtual-hosted.
-blocks-storage.s3.bucket-name string
S3 bucket name
-blocks-storage.s3.endpoint string
@@ -835,6 +839,8 @@ Usage of ./cmd/mimir/mimir:
JSON either from a Google Developers Console client_credentials.json file, or a Google Developers service account key. Needs to be valid JSON, not a filesystem path.
-common.storage.s3.access-key-id string
S3 access key ID
+ -common.storage.s3.bucket-lookup-type value
+ Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: auto, path, virtual-hosted.
-common.storage.s3.bucket-name string
S3 bucket name
-common.storage.s3.endpoint string
@@ -2011,6 +2017,8 @@ Usage of ./cmd/mimir/mimir:
[experimental] Split instant queries by an interval and execute in parallel. 0 to disable it.
-query-frontend.split-queries-by-interval duration
Split range queries by an interval and execute in parallel. You should use a multiple of 24 hours to optimize querying blocks. 0 to disable it. (default 24h0m0s)
+ -query-frontend.use-active-series-decoder
+ [experimental] Set to true to use the zero-allocation response decoder for active series queries.
-query-scheduler.additional-query-queue-dimensions-enabled
[experimental] Enqueue query requests with additional queue dimensions to split tenant request queues into subqueues. This enables separate requests to proceed from a tenant's subqueues even when other subqueues are blocked on slow query requests. Must be set on both query-frontend and scheduler to take effect. (default false)
-query-scheduler.grpc-client-config.backoff-max-period duration
@@ -2251,6 +2259,8 @@ Usage of ./cmd/mimir/mimir:
Directory to scan for rules
-ruler-storage.s3.access-key-id string
S3 access key ID
+ -ruler-storage.s3.bucket-lookup-type value
+ Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: auto, path, virtual-hosted.
-ruler-storage.s3.bucket-name string
S3 bucket name
-ruler-storage.s3.endpoint string
@@ -2647,6 +2657,8 @@ Usage of ./cmd/mimir/mimir:
Regex for matching the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used
-server.path-prefix string
Base path to serve all API routes from (e.g. /v1/)
+ -server.proxy-protocol-enabled
+ [experimental] Enables PROXY protocol.
-server.register-instrumentation
Register the intrumentation handlers (/metrics etc). (default true)
-server.report-grpc-codes-in-instrumentation-label-enabled
@@ -2757,6 +2769,38 @@ Usage of ./cmd/mimir/mimir:
[experimental] The number of workers used for each tenant federated query. This setting limits the maximum number of per-tenant queries executed at a time for a tenant federated query. (default 16)
-tenant-federation.max-tenants int
[experimental] The max number of tenant IDs that may be supplied for a federated query if enabled. 0 to disable the limit.
+ -tests.basic-auth-password string
+ The password to use for HTTP bearer authentication. (mutually exclusive with tenant-id or bearer-token flags)
+ -tests.basic-auth-user string
+ The username to use for HTTP bearer authentication. (mutually exclusive with tenant-id or bearer-token flags)
+ -tests.bearer-token string
+ The bearer token to use for HTTP bearer authentication. (mutually exclusive with tenant-id flag or basic-auth flags)
+ -tests.read-endpoint string
+ The base endpoint on the read path. The URL should have no trailing slash. The specific API path is appended by the tool to the URL, for example /api/v1/query_range for range query API, so the configured URL must not include it.
+ -tests.read-timeout duration
+ The timeout for a single read request. (default 1m0s)
+ -tests.run-interval duration
+ How frequently tests should run. (default 5m0s)
+ -tests.smoke-test
+ Run a smoke test, i.e. run all tests once and exit.
+ -tests.tenant-id string
+ The tenant ID to use to write and read metrics in tests. (mutually exclusive with basic-auth or bearer-token flags) (default "anonymous")
+ -tests.write-batch-size int
+ The maximum number of series to write in a single request. (default 1000)
+ -tests.write-endpoint string
+ The base endpoint on the write path. The URL should have no trailing slash. The specific API path is appended by the tool to the URL, for example /api/v1/push for the remote write API endpoint, so the configured URL must not include it.
+ -tests.write-protocol string
+ The protocol to use to write series data. Supported values are: prometheus, otlp-http (default "prometheus")
+ -tests.write-read-series-test.float-samples-enabled
+ Set to true to use float samples (default true)
+ -tests.write-read-series-test.histogram-samples-enabled
+ Set to true to use native histogram samples
+ -tests.write-read-series-test.max-query-age duration
+ How back in the past metrics can be queried at most. (default 168h0m0s)
+ -tests.write-read-series-test.num-series int
+ Number of series used for the test. (default 10000)
+ -tests.write-timeout duration
+ The timeout for a single write request. (default 5s)
-timeseries-unmarshal-caching-optimization-enabled
[experimental] Enables optimized marshaling of timeseries. (default true)
-usage-stats.enabled
diff --git a/cmd/mimir/help.txt.tmpl b/cmd/mimir/help.txt.tmpl
index 93c78089569..7dd666432df 100644
--- a/cmd/mimir/help.txt.tmpl
+++ b/cmd/mimir/help.txt.tmpl
@@ -719,6 +719,38 @@ Usage of ./cmd/mimir/mimir:
Comma-separated list of components to include in the instantiated process. The default value 'all' includes all components that are required to form a functional Grafana Mimir instance in single-binary mode. Use the '-modules' command line flag to get a list of available components, and to see which components are included with 'all'. (default all)
-tenant-federation.enabled
If enabled on all services, queries can be federated across multiple tenants. The tenant IDs involved need to be specified separated by a '|' character in the 'X-Scope-OrgID' header.
+ -tests.basic-auth-password string
+ The password to use for HTTP bearer authentication. (mutually exclusive with tenant-id or bearer-token flags)
+ -tests.basic-auth-user string
+ The username to use for HTTP bearer authentication. (mutually exclusive with tenant-id or bearer-token flags)
+ -tests.bearer-token string
+ The bearer token to use for HTTP bearer authentication. (mutually exclusive with tenant-id flag or basic-auth flags)
+ -tests.read-endpoint string
+ The base endpoint on the read path. The URL should have no trailing slash. The specific API path is appended by the tool to the URL, for example /api/v1/query_range for range query API, so the configured URL must not include it.
+ -tests.read-timeout duration
+ The timeout for a single read request. (default 1m0s)
+ -tests.run-interval duration
+ How frequently tests should run. (default 5m0s)
+ -tests.smoke-test
+ Run a smoke test, i.e. run all tests once and exit.
+ -tests.tenant-id string
+ The tenant ID to use to write and read metrics in tests. (mutually exclusive with basic-auth or bearer-token flags) (default "anonymous")
+ -tests.write-batch-size int
+ The maximum number of series to write in a single request. (default 1000)
+ -tests.write-endpoint string
+ The base endpoint on the write path. The URL should have no trailing slash. The specific API path is appended by the tool to the URL, for example /api/v1/push for the remote write API endpoint, so the configured URL must not include it.
+ -tests.write-protocol string
+ The protocol to use to write series data. Supported values are: prometheus, otlp-http (default "prometheus")
+ -tests.write-read-series-test.float-samples-enabled
+ Set to true to use float samples (default true)
+ -tests.write-read-series-test.histogram-samples-enabled
+ Set to true to use native histogram samples
+ -tests.write-read-series-test.max-query-age duration
+ How back in the past metrics can be queried at most. (default 168h0m0s)
+ -tests.write-read-series-test.num-series int
+ Number of series used for the test. (default 10000)
+ -tests.write-timeout duration
+ The timeout for a single write request. (default 5s)
-usage-stats.enabled
Enable anonymous usage reporting. (default true)
-usage-stats.installation-mode string
diff --git a/cmd/mimir/main_test.go b/cmd/mimir/main_test.go
index bfe4c6fffdd..c3fcf8b818a 100644
--- a/cmd/mimir/main_test.go
+++ b/cmd/mimir/main_test.go
@@ -20,7 +20,7 @@ import (
"gopkg.in/yaml.v3"
"github.com/grafana/mimir/pkg/mimir"
- "github.com/grafana/mimir/pkg/util/fieldcategory"
+ "github.com/grafana/mimir/pkg/util/configdoc"
"github.com/grafana/mimir/pkg/util/test"
)
@@ -443,7 +443,7 @@ func TestParseConfigFileParameter(t *testing.T) {
func TestFieldCategoryOverridesNotStale(t *testing.T) {
overrides := make(map[string]struct{})
- fieldcategory.VisitOverrides(func(s string) {
+ configdoc.VisitCategoryOverrides(func(s string) {
overrides[s] = struct{}{}
})
diff --git a/cmd/mimirtool/main.go b/cmd/mimirtool/main.go
index 416dcef20c5..4beff2a5f58 100644
--- a/cmd/mimirtool/main.go
+++ b/cmd/mimirtool/main.go
@@ -49,7 +49,7 @@ func main() {
remoteReadCommand.Register(app, envVars)
ruleCommand.Register(app, envVars, prometheus.DefaultRegisterer)
- app.Command("version", "Get the version of the mimirtool CLI").Action(func(k *kingpin.ParseContext) error {
+ app.Command("version", "Get the version of the mimirtool CLI").Action(func(*kingpin.ParseContext) error {
fmt.Fprintln(os.Stdout, mimirversion.Print("Mimirtool"))
version.CheckLatest(mimirversion.Version)
return nil
diff --git a/development/mimir-ingest-storage/config/grafana-agent.flow b/development/mimir-ingest-storage/config/grafana-agent.flow
new file mode 100644
index 00000000000..bdc64537308
--- /dev/null
+++ b/development/mimir-ingest-storage/config/grafana-agent.flow
@@ -0,0 +1,107 @@
+prometheus.scrape "metrics_local_mimir_read_write_mode_mimir_write" {
+ targets = concat(
+ [{
+ __address__ = "mimir-write-zone-a-1:8080",
+ cluster = "docker-compose",
+ container = "mimir-write",
+ namespace = "mimir-read-write-mode",
+ }],
+ [{
+ __address__ = "mimir-write-zone-b-1:8080",
+ cluster = "docker-compose",
+ container = "mimir-write",
+ namespace = "mimir-read-write-mode",
+ }],
+ [{
+ __address__ = "mimir-write-zone-a-2:8080",
+ cluster = "docker-compose",
+ container = "mimir-write",
+ namespace = "mimir-read-write-mode",
+ }],
+ [{
+ __address__ = "mimir-write-zone-b-2:8080",
+ cluster = "docker-compose",
+ container = "mimir-write",
+ namespace = "mimir-read-write-mode",
+ }],
+ [{
+ __address__ = "mimir-write-zone-a-3:8080",
+ cluster = "docker-compose",
+ container = "mimir-write",
+ namespace = "mimir-read-write-mode",
+ }],
+ [{
+ __address__ = "mimir-write-zone-b-3:8080",
+ cluster = "docker-compose",
+ container = "mimir-write",
+ namespace = "mimir-read-write-mode",
+ }],
+ )
+ forward_to = [prometheus.remote_write.metrics_local.receiver]
+ job_name = "mimir-read-write-mode/mimir-write"
+ scrape_interval = "5s"
+ scrape_timeout = "5s"
+
+ enable_protobuf_negotiation = true
+ scrape_classic_histograms = true
+}
+
+prometheus.scrape "metrics_local_mimir_read_write_mode_mimir_read" {
+ targets = concat(
+ [{
+ __address__ = "mimir-read-1:8080",
+ cluster = "docker-compose",
+ container = "mimir-read",
+ namespace = "mimir-read-write-mode",
+ }],
+ [{
+ __address__ = "mimir-read-2:8080",
+ cluster = "docker-compose",
+ container = "mimir-read",
+ namespace = "mimir-read-write-mode",
+ }],
+ )
+ forward_to = [prometheus.remote_write.metrics_local.receiver]
+ job_name = "mimir-read-write-mode/mimir-read"
+ scrape_interval = "5s"
+ scrape_timeout = "5s"
+
+ enable_protobuf_negotiation = true
+ scrape_classic_histograms = true
+}
+
+prometheus.scrape "metrics_local_mimir_read_write_mode_mimir_backend" {
+ targets = concat(
+ [{
+ __address__ = "mimir-backend-1:8080",
+ cluster = "docker-compose",
+ container = "mimir-backend",
+ namespace = "mimir-read-write-mode",
+ }],
+ [{
+ __address__ = "mimir-backend-2:8080",
+ cluster = "docker-compose",
+ container = "mimir-backend",
+ namespace = "mimir-read-write-mode",
+ }],
+ )
+ forward_to = [prometheus.remote_write.metrics_local.receiver]
+ job_name = "mimir-read-write-mode/mimir-backend"
+ scrape_interval = "5s"
+ scrape_timeout = "5s"
+
+ enable_protobuf_negotiation = true
+ scrape_classic_histograms = true
+}
+
+prometheus.remote_write "metrics_local" {
+ endpoint {
+ name = "local"
+ url = "http://mimir-write-zone-a-1:8080/api/v1/push"
+ send_native_histograms = true
+
+ queue_config { }
+
+ metadata_config { }
+ }
+}
diff --git a/development/mimir-ingest-storage/config/grafana-agent.yaml b/development/mimir-ingest-storage/config/grafana-agent.yaml
deleted file mode 100644
index 409c7362067..00000000000
--- a/development/mimir-ingest-storage/config/grafana-agent.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-server:
- log_level: info
-
-prometheus:
- global:
- scrape_interval: 5s
- configs:
- - name: local
- host_filter: false
- scrape_configs:
- - job_name: mimir-read-write-mode/mimir-write
- static_configs:
- - targets:
- - 'mimir-write-zone-a-1:8080'
- - 'mimir-write-zone-b-1:8080'
- - 'mimir-write-zone-a-2:8080'
- - 'mimir-write-zone-b-2:8080'
- - 'mimir-write-zone-a-3:8080'
- - 'mimir-write-zone-b-3:8080'
- labels:
- cluster: 'docker-compose'
- namespace: 'mimir-read-write-mode'
- container: 'mimir-write'
- - job_name: mimir-read-write-mode/mimir-read
- static_configs:
- - targets: ['mimir-read-1:8080', 'mimir-read-2:8080']
- labels:
- cluster: 'docker-compose'
- namespace: 'mimir-read-write-mode'
- container: 'mimir-read'
- - job_name: mimir-read-write-mode/mimir-backend
- static_configs:
- - targets: ['mimir-backend-1:8080', 'mimir-backend-2:8080']
- labels:
- cluster: 'docker-compose'
- namespace: 'mimir-read-write-mode'
- container: 'mimir-backend'
-
- remote_write:
- - url: http://mimir-write-zone-a-1:8080/api/v1/push
diff --git a/development/mimir-ingest-storage/config/mimir.yaml b/development/mimir-ingest-storage/config/mimir.yaml
index dc2cfc47e40..3404298ff60 100644
--- a/development/mimir-ingest-storage/config/mimir.yaml
+++ b/development/mimir-ingest-storage/config/mimir.yaml
@@ -16,7 +16,8 @@ ingest_storage:
topic: mimir-ingest
ingester:
- return_only_grpc_errors: true
+ return_only_grpc_errors: true
+ track_ingester_owned_series: true
partition_ring:
min_partition_owners_count: 2
diff --git a/development/mimir-ingest-storage/docker-compose.jsonnet b/development/mimir-ingest-storage/docker-compose.jsonnet
index 0ea0cb34a0b..7a24ffe6d88 100644
--- a/development/mimir-ingest-storage/docker-compose.jsonnet
+++ b/development/mimir-ingest-storage/docker-compose.jsonnet
@@ -144,6 +144,12 @@ std.manifestYamlDoc({
'KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,ORBSTACK:PLAINTEXT',
'KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT',
'KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1',
+ 'KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS=10000',
+
+ // Decomment the following config to keep a short retentino of records in Kafka.
+ // This is useful to test the behaviour when Kafka records are deleted.
+ // 'KAFKA_LOG_RETENTION_MINUTES=1',
+ // 'KAFKA_LOG_SEGMENT_BYTES=1000000',
],
ports: [
'29092:29092',
@@ -184,10 +190,13 @@ std.manifestYamlDoc({
// Scrape the metrics also with the Grafana agent (useful to test metadata ingestion
// until metadata remote write is not supported by Prometheus).
'grafana-agent': {
- image: 'grafana/agent:v0.37.3',
- command: ['-config.file=/etc/agent-config/grafana-agent.yaml', '-metrics.wal-directory=/tmp', '-server.http.address=127.0.0.1:9091'],
+ image: 'grafana/agent:v0.40.0',
+ command: ['run', '--storage.path=/tmp', '--server.http.listen-addr=127.0.0.1:9091', '/etc/agent-config/grafana-agent.flow'],
volumes: ['./config:/etc/agent-config'],
ports: ['9091:9091'],
+ environment: {
+ AGENT_MODE: 'flow',
+ },
},
},
diff --git a/development/mimir-ingest-storage/docker-compose.yml b/development/mimir-ingest-storage/docker-compose.yml
index ec4022ba4e3..957a732b765 100644
--- a/development/mimir-ingest-storage/docker-compose.yml
+++ b/development/mimir-ingest-storage/docker-compose.yml
@@ -12,10 +12,13 @@
- "../../operations/mimir-mixin-compiled/dashboards:/var/lib/grafana/dashboards"
"grafana-agent":
"command":
- - "-config.file=/etc/agent-config/grafana-agent.yaml"
- - "-metrics.wal-directory=/tmp"
- - "-server.http.address=127.0.0.1:9091"
- "image": "grafana/agent:v0.37.3"
+ - "run"
+ - "--storage.path=/tmp"
+ - "--server.http.listen-addr=127.0.0.1:9091"
+ - "/etc/agent-config/grafana-agent.flow"
+ "environment":
+ "AGENT_MODE": "flow"
+ "image": "grafana/agent:v0.40.0"
"ports":
- "9091:9091"
"volumes":
@@ -31,6 +34,7 @@
- "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,ORBSTACK:PLAINTEXT"
- "KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT"
- "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1"
+ - "KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS=10000"
"healthcheck":
"interval": "1s"
"retries": "30"
diff --git a/development/mimir-monolithic-mode-with-swift-storage/docker-compose.yml b/development/mimir-monolithic-mode-with-swift-storage/docker-compose.yml
index c5dba048ca9..074dbabcd89 100644
--- a/development/mimir-monolithic-mode-with-swift-storage/docker-compose.yml
+++ b/development/mimir-monolithic-mode-with-swift-storage/docker-compose.yml
@@ -43,7 +43,7 @@ services:
# Scrape the metrics also with the Grafana agent (useful to test metadata ingestion
# until metadata remote write is not supported by Prometheus).
grafana-agent:
- image: grafana/agent:v0.40.2
+ image: grafana/agent:v0.40.3
command: ["-config.file=/etc/agent-config/grafana-agent.yaml", "-metrics.wal-directory=/tmp", "-server.http.address=127.0.0.1:9091"]
volumes:
- ./config:/etc/agent-config
diff --git a/development/mimir-monolithic-mode/docker-compose.yml b/development/mimir-monolithic-mode/docker-compose.yml
index 7e6d4db1c37..b2eb1e23f6f 100644
--- a/development/mimir-monolithic-mode/docker-compose.yml
+++ b/development/mimir-monolithic-mode/docker-compose.yml
@@ -44,7 +44,7 @@ services:
grafana-agent:
profiles:
- grafana-agent-static
- image: grafana/agent:v0.40.2
+ image: grafana/agent:v0.40.3
command: ["-config.file=/etc/agent-config/grafana-agent.yaml", "-metrics.wal-directory=/tmp", "-server.http.address=127.0.0.1:9091"]
volumes:
- ./config:/etc/agent-config
@@ -54,7 +54,7 @@ services:
grafana-agent-flow:
profiles:
- grafana-agent-flow
- image: grafana/agent:v0.40.2
+ image: grafana/agent:v0.40.3
environment:
- AGENT_MODE=flow
command: ["run", "--server.http.listen-addr=0.0.0.0:9092", "/etc/agent/config.river"]
diff --git a/docs/sources/helm-charts/mimir-distributed/migration-guides/migrate-from-cortex.md b/docs/sources/helm-charts/mimir-distributed/migration-guides/migrate-from-cortex.md
index 6e086ea6034..6bfc9ed8e9a 100644
--- a/docs/sources/helm-charts/mimir-distributed/migration-guides/migrate-from-cortex.md
+++ b/docs/sources/helm-charts/mimir-distributed/migration-guides/migrate-from-cortex.md
@@ -149,7 +149,7 @@ You can migrate to the Grafana Mimir Helm chart (`grafana/mimir-distributed` v3.
The ingester needs storage capacity for write-ahead-logging (WAL) and to create blocks for uploading.
The WAL was optional in Cortex with chunks, but not optional in Mimir.
- A StatefulSet is the most convenient way to make sure that each Pod gets a storage volume.
+ A StatefulSet is the most convenient way to make sure that each Pod gets a dedicated storage volume.
**To migrate to the Grafana Mimir Helm chart:**
diff --git a/docs/sources/mimir/configure/about-versioning.md b/docs/sources/mimir/configure/about-versioning.md
index dbdfa1eb9c2..d7b77e92648 100644
--- a/docs/sources/mimir/configure/about-versioning.md
+++ b/docs/sources/mimir/configure/about-versioning.md
@@ -170,6 +170,9 @@ The following features are currently experimental:
- `-ingester.use-ingester-owned-series-for-limits`
- `-ingester.track-ingester-owned-series`
- `-ingester.owned-series-update-interval`
+- Server
+ - [PROXY protocol](https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt) support
+ - `-server.proxy-protocol-enabled`
## Deprecated features
diff --git a/docs/sources/mimir/configure/configuration-parameters/index.md b/docs/sources/mimir/configure/configuration-parameters/index.md
index 1009de0f27e..518dbdabb9a 100644
--- a/docs/sources/mimir/configure/configuration-parameters/index.md
+++ b/docs/sources/mimir/configure/configuration-parameters/index.md
@@ -524,6 +524,10 @@ The `server` block configures the HTTP and gRPC server of the launched service(s
# CLI flag: -server.grpc-conn-limit
[grpc_listen_conn_limit: | default = 0]
+# (experimental) Enables PROXY protocol.
+# CLI flag: -server.proxy-protocol-enabled
+[proxy_protocol_enabled: | default = false]
+
# Comma-separated list of cipher suites to use. If blank, the default Go cipher
# suites is used.
# CLI flag: -server.tls-cipher-suites
@@ -1248,10 +1252,6 @@ store_gateway_client:
# Allowed values:
#
# Secure Ciphers:
- # - TLS_RSA_WITH_AES_128_CBC_SHA
- # - TLS_RSA_WITH_AES_256_CBC_SHA
- # - TLS_RSA_WITH_AES_128_GCM_SHA256
- # - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_AES_128_GCM_SHA256
# - TLS_AES_256_GCM_SHA384
# - TLS_CHACHA20_POLY1305_SHA256
@@ -1269,7 +1269,11 @@ store_gateway_client:
# Insecure Ciphers:
# - TLS_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+ # - TLS_RSA_WITH_AES_128_CBC_SHA
+ # - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
+ # - TLS_RSA_WITH_AES_128_GCM_SHA256
+ # - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
@@ -1504,6 +1508,11 @@ results_cache:
# CLI flag: -query-frontend.shard-active-series-queries
[shard_active_series_queries: | default = false]
+# (experimental) Set to true to use the zero-allocation response decoder for
+# active series queries.
+# CLI flag: -query-frontend.use-active-series-decoder
+[use_active_series_decoder: | default = false]
+
# Format to use when retrieving query results from queriers. Supported values:
# json, protobuf
# CLI flag: -query-frontend.query-result-response-format
@@ -1716,10 +1725,6 @@ alertmanager_client:
# Allowed values:
#
# Secure Ciphers:
- # - TLS_RSA_WITH_AES_128_CBC_SHA
- # - TLS_RSA_WITH_AES_256_CBC_SHA
- # - TLS_RSA_WITH_AES_128_GCM_SHA256
- # - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_AES_128_GCM_SHA256
# - TLS_AES_256_GCM_SHA384
# - TLS_CHACHA20_POLY1305_SHA256
@@ -1737,7 +1742,11 @@ alertmanager_client:
# Insecure Ciphers:
# - TLS_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+ # - TLS_RSA_WITH_AES_128_CBC_SHA
+ # - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
+ # - TLS_RSA_WITH_AES_128_GCM_SHA256
+ # - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
@@ -2193,10 +2202,6 @@ alertmanager_client:
# Allowed values:
#
# Secure Ciphers:
- # - TLS_RSA_WITH_AES_128_CBC_SHA
- # - TLS_RSA_WITH_AES_256_CBC_SHA
- # - TLS_RSA_WITH_AES_128_GCM_SHA256
- # - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_AES_128_GCM_SHA256
# - TLS_AES_256_GCM_SHA384
# - TLS_CHACHA20_POLY1305_SHA256
@@ -2214,7 +2219,11 @@ alertmanager_client:
# Insecure Ciphers:
# - TLS_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+ # - TLS_RSA_WITH_AES_128_CBC_SHA
+ # - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
+ # - TLS_RSA_WITH_AES_128_GCM_SHA256
+ # - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
@@ -2464,10 +2473,6 @@ backoff_config:
# Allowed values:
#
# Secure Ciphers:
-# - TLS_RSA_WITH_AES_128_CBC_SHA
-# - TLS_RSA_WITH_AES_256_CBC_SHA
-# - TLS_RSA_WITH_AES_128_GCM_SHA256
-# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_AES_128_GCM_SHA256
# - TLS_AES_256_GCM_SHA384
# - TLS_CHACHA20_POLY1305_SHA256
@@ -2485,7 +2490,11 @@ backoff_config:
# Insecure Ciphers:
# - TLS_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+# - TLS_RSA_WITH_AES_128_CBC_SHA
+# - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
+# - TLS_RSA_WITH_AES_128_GCM_SHA256
+# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
@@ -2621,10 +2630,6 @@ The `etcd` block configures the etcd client. The supported CLI flags ``
# Allowed values:
#
# Secure Ciphers:
-# - TLS_RSA_WITH_AES_128_CBC_SHA
-# - TLS_RSA_WITH_AES_256_CBC_SHA
-# - TLS_RSA_WITH_AES_128_GCM_SHA256
-# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_AES_128_GCM_SHA256
# - TLS_AES_256_GCM_SHA384
# - TLS_CHACHA20_POLY1305_SHA256
@@ -2642,7 +2647,11 @@ The `etcd` block configures the etcd client. The supported CLI flags ``
# Insecure Ciphers:
# - TLS_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+# - TLS_RSA_WITH_AES_128_CBC_SHA
+# - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
+# - TLS_RSA_WITH_AES_128_GCM_SHA256
+# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
@@ -2878,10 +2887,6 @@ The `memberlist` block configures the Gossip memberlist.
# Allowed values:
#
# Secure Ciphers:
-# - TLS_RSA_WITH_AES_128_CBC_SHA
-# - TLS_RSA_WITH_AES_256_CBC_SHA
-# - TLS_RSA_WITH_AES_128_GCM_SHA256
-# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_AES_128_GCM_SHA256
# - TLS_AES_256_GCM_SHA384
# - TLS_CHACHA20_POLY1305_SHA256
@@ -2899,7 +2904,11 @@ The `memberlist` block configures the Gossip memberlist.
# Insecure Ciphers:
# - TLS_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+# - TLS_RSA_WITH_AES_128_CBC_SHA
+# - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
+# - TLS_RSA_WITH_AES_128_GCM_SHA256
+# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
@@ -4374,10 +4383,6 @@ The `memcached` block configures the Memcached-based caching backend. The suppor
# Allowed values:
#
# Secure Ciphers:
-# - TLS_RSA_WITH_AES_128_CBC_SHA
-# - TLS_RSA_WITH_AES_256_CBC_SHA
-# - TLS_RSA_WITH_AES_128_GCM_SHA256
-# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_AES_128_GCM_SHA256
# - TLS_AES_256_GCM_SHA384
# - TLS_CHACHA20_POLY1305_SHA256
@@ -4395,7 +4400,11 @@ The `memcached` block configures the Memcached-based caching backend. The suppor
# Insecure Ciphers:
# - TLS_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+# - TLS_RSA_WITH_AES_128_CBC_SHA
+# - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
+# - TLS_RSA_WITH_AES_128_GCM_SHA256
+# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
@@ -4531,10 +4540,6 @@ The `redis` block configures the Redis-based caching backend. The supported CLI
# Allowed values:
#
# Secure Ciphers:
-# - TLS_RSA_WITH_AES_128_CBC_SHA
-# - TLS_RSA_WITH_AES_256_CBC_SHA
-# - TLS_RSA_WITH_AES_128_GCM_SHA256
-# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_AES_128_GCM_SHA256
# - TLS_AES_256_GCM_SHA384
# - TLS_CHACHA20_POLY1305_SHA256
@@ -4552,7 +4557,11 @@ The `redis` block configures the Redis-based caching backend. The supported CLI
# Insecure Ciphers:
# - TLS_RSA_WITH_RC4_128_SHA
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+# - TLS_RSA_WITH_AES_128_CBC_SHA
+# - TLS_RSA_WITH_AES_256_CBC_SHA
# - TLS_RSA_WITH_AES_128_CBC_SHA256
+# - TLS_RSA_WITH_AES_128_GCM_SHA256
+# - TLS_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
@@ -4618,6 +4627,11 @@ The s3_backend block configures the connection to Amazon S3 object storage backe
# CLI flag: -.s3.list-objects-version
[list_objects_version: | default = ""]
+# (advanced) Bucket lookup style type, used to access bucket in S3-compatible
+# service. Default is auto. Supported values are: auto, path, virtual-hosted.
+# CLI flag: -.s3.bucket-lookup-type
+[bucket_lookup_type: | default = "auto"]
+
# (experimental) The S3 storage class to use, not set by default. Details can be
# found at https://aws.amazon.com/s3/storage-classes/. Supported values are:
# STANDARD, REDUCED_REDUNDANCY, GLACIER, STANDARD_IA, ONEZONE_IA,
diff --git a/docs/sources/mimir/get-started/about-grafana-mimir-architecture/index.md b/docs/sources/mimir/get-started/about-grafana-mimir-architecture/index.md
index 7b414736e8e..fb32410962d 100644
--- a/docs/sources/mimir/get-started/about-grafana-mimir-architecture/index.md
+++ b/docs/sources/mimir/get-started/about-grafana-mimir-architecture/index.md
@@ -42,7 +42,7 @@ Each newly created block is uploaded to long-term storage and kept in the ingest
This gives [queriers]({{< relref "../../references/architecture/components/querier" >}}) and [store-gateways]({{< relref "../../references/architecture/components/store-gateway" >}}) enough time to discover the new block on the storage and download its index-header.
To effectively use the WAL, and to be able to recover the in-memory series if an ingester abruptly terminates, store the WAL to a persistent disk that can survive an ingester failure.
-For example, when running in the cloud, include an AWS EBS volume or a GCP persistent disk.
+For example, when running in the cloud, include an AWS EBS volume or a GCP Persistent Disk.
If you are running the Grafana Mimir cluster in Kubernetes, you can use a StatefulSet with a persistent volume claim for the ingesters.
The location on the filesystem where the WAL is stored is the same location where local TSDB blocks (compacted from head) are stored. The locations of the WAL and the local TSDB blocks cannot be decoupled.
@@ -78,7 +78,7 @@ After the querier executes the query, it returns the results to the query-fronte
## The role of Prometheus
Prometheus instances scrape samples from various targets and push them to Grafana Mimir by using Prometheus’ [remote write API](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations).
-The remote write API emits batched [Snappy](https://google.github.io/snappy/)-compressed [Protocol Buffer](https://developers.google.com/protocol-buffers/) messages inside the body of an HTTP `PUT` request.
+The remote write API emits batched [Snappy](https://google.github.io/snappy/)-compressed [Protocol Buffer](https://protobuf.dev/) messages inside the body of an HTTP `PUT` request.
Mimir requires that each HTTP request has a header that specifies a tenant ID for the request. Request [authentication and authorization]({{< relref "../../manage/secure/authentication-and-authorization" >}}) are handled by an external reverse proxy.
diff --git a/docs/sources/mimir/manage/mimir-runbooks/_index.md b/docs/sources/mimir/manage/mimir-runbooks/_index.md
index af46e1d8723..4d480867c53 100644
--- a/docs/sources/mimir/manage/mimir-runbooks/_index.md
+++ b/docs/sources/mimir/manage/mimir-runbooks/_index.md
@@ -190,7 +190,7 @@ How to **investigate**:
- Check the `Mimir / Writes` dashboard
- Looking at the dashboard you should see in which Mimir service the high latency originates
- - The panels in the dashboard are vertically sorted by the network path (eg. gateway -> distributor -> ingester)
+ - The panels in the dashboard are vertically sorted by the network path (eg. gateway -> distributor -> ingester). When using [ingest-storage](#mimir-ingest-storage-experimental), network path changes to gateway -> distributor -> Kafka instead.
- Deduce where in the stack the latency is being introduced
- **`gateway`**
- Latency may be caused by the time taken for the gateway to receive the entire request from the client. There are a multitude of reasons this can occur, so communication with the user may be necessary. For example:
@@ -201,6 +201,7 @@ How to **investigate**:
- There could be a problem with authentication (eg. slow to run auth layer)
- **`distributor`**
- Typically, distributor p99 latency is in the range 50-100ms. If the distributor latency is higher than this, you may need to scale up the distributors.
+ - When using Mimir [ingest-storage](#mimir-ingest-storage-experimental), distributors are writing requests to Kafka-compatible backend. Increased latency in distributor may also come from this backend.
- **`ingester`**
- Typically, ingester p99 latency is in the range 5-50ms. If the ingester latency is higher than this, you should investigate the root cause before scaling up ingesters.
- Check out the following alerts and fix them if firing:
@@ -243,6 +244,9 @@ How to **investigate**:
- If queries are not waiting in queue
- Consider [enabling query sharding]({{< relref "../../references/architecture/query-sharding#how-to-enable-query-sharding" >}}) if not already enabled, to increase query parallelism
- If query sharding already enabled, consider increasing total number of query shards (`query_sharding_total_shards`) for tenants submitting slow queries, so their queries can be further parallelized
+ - **`ingester`**
+ - Check if ingesters are not overloaded. If they are and you can scale up ingesters vertically, that may be the best action. If that's not possible, scaling horizontally can help as well, but it can take several hours for ingesters to fully redistribute their series.
+ - When using [ingest-storage](#mimir-ingest-storage-experimental), check ratio of queries using strong-consistency, and latency of queries using strong-consistency.
#### Alertmanager
@@ -278,6 +282,7 @@ How to **investigate**:
- If the failing service is crashing / panicking: look for the stack trace in the logs and investigate from there
- If crashing service is query-frontend, querier or store-gateway, and you have "activity tracker" feature enabled, look for `found unfinished activities from previous run` message and subsequent `activity` messages in the log file to see which queries caused the crash.
- When using Memberlist as KV store for hash rings, ensure that Memberlist is working correctly. See instructions for the [`MimirGossipMembersTooHigh`](#MimirGossipMembersTooHigh) and [`MimirGossipMembersTooLow`](#MimirGossipMembersTooLow) alerts.
+- When using [ingest-storage](#mimir-ingest-storage-experimental) and distributors are failing to write requests to Kafka, make sure that Kafka is up and running correctly.
#### Alertmanager
@@ -672,10 +677,6 @@ How to **investigate**:
- Ensure the compactor is successfully running
- Look for any error in the compactor logs
-### MimirQueriesIncorrect
-
-_TODO: this runbook has not been written yet._
-
### MimirInconsistentRuntimeConfig
This alert fires if multiple replicas of the same Mimir service are using a different runtime config for a longer period of time.
@@ -1307,6 +1308,117 @@ How to **investigate** and **fix** it:
- Check the number of in-memory series shown on the `Mimir / Tenants` dashboard for an approximation of the number of series that will be compacted once these blocks are shipped from ingesters.
- Check the configured `compactor_split_and_merge_shards` for the tenant. A reasonable rule of thumb is 8-10 million series per compactor shard - if the number of series per shard is above this range, increase `compactor_split_and_merge_shards` for the affected tenant(s) accordingly.
+## Mimir ingest storage (experimental)
+
+This section contains runbooks for alerts related to experimental Mimir ingest storage.
+In this context, any reference to Kafka means a Kafka protocol-compatible backend.
+
+### MimirIngesterLastConsumedOffsetCommitFailed
+
+This alert fires when an ingester is failing to commit the last consumed offset to the Kafka backend.
+
+How it **works**:
+
+- The ingester ingests data (metrics, exemplars, ...) from Kafka and periodically commits the last consumed offset back to Kafka.
+- At startup, an ingester reads the last consumed offset committed to Kafka and resumes the consumption from there.
+- If the ingester fails to commit the last consumed offset to Kafka, the ingester keeps working correctly from the consumption perspective (assuming there's no other on-going issue in the cluster) but in case of a restart the ingester will resume the consumption from the last successfully committed offset. If the last offset was successfully committed several minutes ago, the ingester will re-ingest data which has already been ingested, potentially causing OOO errors, wasting resources and taking longer to startup.
+
+How to **investigate**:
+
+- Check ingester logs to find details about the error.
+- Check Kafka logs and health.
+
+### MimirIngesterFailedToReadRecordsFromKafka
+
+This alert fires when an ingester is failing to read records from Kafka backend.
+
+How it **works**:
+
+- Ingester connects to Kafka brokers and reads records from it. Records contain write requests committed by distributors.
+- When ingester fails to read more records from Kafka due to error, ingester logs such error.
+- This can be normal if Kafka brokers are restarting, however if read errors continue for some time, alert is raised.
+
+How to **investigate**:
+
+- Check ingester logs to find details about the error.
+- Check Kafka logs and health.
+
+### MimirIngesterKafkaFetchErrorsRateTooHigh
+
+This alert fires when an ingester is receiving errors instead of "fetches" from Kafka.
+
+How it **works**:
+
+- Ingester uses Kafka client to read records (containing write requests) from Kafka.
+- Kafka client can return errors instead of more records.
+- If rate of returned errors compared to returned records is too high, alert is raised.
+- Kafka client can return errors [documented in the source code](https://github.com/grafana/mimir/blob/24591ae56cd7d6ef24a7cc1541a41405676773f4/vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go#L332-L366).
+
+How to **investigate**:
+
+- Check ingester logs to find details about the error.
+- Check Kafka logs and health.
+
+### MimirStartingIngesterKafkaReceiveDelayIncreasing
+
+This alert fires when "receive delay" reported by ingester during "starting" phase is not decreasing.
+
+How it **works**:
+
+- When ingester is starting, it needs to fetch and process records from Kafka until preconfigured consumption lag is honored. The maximum tolerated lag before an ingester is considered to have caught up reading from a partition at startup can be configured via `-ingest-storage.kafka.max-consumer-lag-at-startup`.
+- Each record has a timestamp when it was sent to Kafka by the distributor. When ingester reads the record, it computes "receive delay" as a difference between current time (when record was read) and time when record was sent to Kafka. This receive delay is reported in the metric `cortex_ingest_storage_reader_receive_delay_seconds`. You can see receive delay on `Mimir / Writes` dashboard, in section "Ingester (ingest storage – end-to-end latency)".
+- Under normal conditions when ingester is processing records faster than records are appearing, receive delay should be decreasing, until `-ingest-storage.kafka.max-consumer-lag-at-startup` is honored.
+- When ingester is starting, and observed "receive delay" is increasing, alert is raised.
+
+How to **investigate**:
+
+- Check if ingester is fast enough to process all data in Kafka.
+
+### MimirRunningIngesterReceiveDelayTooHigh
+
+This alert fires when "receive delay" reported by ingester while it's running reaches alert threshold.
+
+How it **works**:
+
+- After ingester start and catches up with records in Kafka, ingester switches to "running" mode.
+- In running mode, ingester continues to process incoming records from Kafka and continues to report "receive delay". See [`MimirStartingIngesterKafkaReceiveDelayIncreasing`](#MimirStartingIngesterKafkaReceiveDelayIncreasing) runbook for details about this metric.
+- Under normal conditions when ingester is running and it is processing records faster than records are appearing, receive delay should be stable and low.
+- If observed "receive delay" increases and reaches certain threshold, alert is raised.
+
+How to **investigate**:
+
+- Check if ingester is fast enough to process all data in Kafka.
+- If ingesters are too slow, consider scaling ingesters horizontally to spread incoming series between more ingesters.
+
+### MimirIngesterFailsToProcessRecordsFromKafka
+
+This alert fires when ingester is unable to process incoming records from Kafka due to internal errors. If ingest-storage wasn't used, such push requests would end up with 5xx errors.
+
+How it **works**:
+
+- Ingester reads records from Kafka, and processes them locally. Processing means unmarshalling the data and handling write requests stored in records.
+- Write requests can fail due to "client" or "server" errors. An example of client error is too low limit for number of series. Server error can be for example ingester hitting an instance limit.
+- If requests keep failing due to server errors, this alert is raised.
+
+How to **investigate**:
+
+- Check ingester logs to see why requests are failing, and troubleshoot based on that.
+
+### MimirIngesterFailsEnforceStrongConsistencyOnReadPath
+
+This alert fires when too many read-requests with strong consistency are failing.
+
+How it **works**:
+
+- When read request asks for strong-consistency guarantee, ingester will read the last produced offset from Kafka, and wait until record with this offset is consumed.
+- If read request times out during this wait, that is considered to be a failure of request with strong-consistency.
+- If requests keep failing due to failure to enforce strong-consistency, this alert is raised.
+
+How to **investigate**:
+
+- Check wait latency of requests with strong-consistency on `Mimir / Queries` dashboard.
+- Check if ingester needs to process too many records, and whether ingesters need to be scaled up (vertically or horizontally).
+
## Errors catalog
Mimir has some codified error IDs that you might see in HTTP responses or logs.
@@ -1365,6 +1477,53 @@ This non-critical error occurs when Mimir receives a write request that contains
The series containing such samples are skipped during ingestion, and valid series within the same request are ingested.
{{< /admonition >}}
+### err-mimir-native-histogram-count-mismatch
+
+This non-critical error occures when Mimir receives a write request that contains a sample that is a native histogram
+where the buckets counts don't add up to the overall count recorded in the native histogram, provided that the overall
+sum is a regular float number.
+
+{{< admonition type="note" >}}
+The series containing such samples are skipped during ingestion, and valid series within the same request are ingested.
+{{< /admonition >}}
+
+### err-mimir-native-histogram-count-not-big-enough
+
+This non-critical error occures when Mimir receives a write request that contains a sample that is a native histogram
+where the buckets counts add up to a higher number than the overall count recorded in the native histogram, provided
+that the overall sum is not a float number (NaN).
+
+{{< admonition type="note" >}}
+The series containing such samples are skipped during ingestion, and valid series within the same request are ingested.
+{{< /admonition >}}
+
+### err-mimir-native-histogram-negative-bucket-count
+
+This non-critical error occures when Mimir receives a write request that contains a sample that is a native histogram
+where some bucket count is negative.
+
+{{< admonition type="note" >}}
+The series containing such samples are skipped during ingestion, and valid series within the same request are ingested.
+{{< /admonition >}}
+
+### err-mimir-native-histogram-span-negative-offset
+
+This non-critical error occures when Mimir receives a write request that contains a sample that is a native histogram
+where a bucket span has a negative offset.
+
+{{< admonition type="note" >}}
+The series containing such samples are skipped during ingestion, and valid series within the same request are ingested.
+{{< /admonition >}}
+
+### err-mimir-native-histogram-spans-buckets-mismatch
+
+This non-critical error occures when Mimir receives a write request that contains a sample that is a native histogram
+where the number of bucket counts does not agree with the number of buckets encoded in the bucket spans.
+
+{{< admonition type="note" >}}
+The series containing such samples are skipped during ingestion, and valid series within the same request are ingested.
+{{< /admonition >}}
+
### err-mimir-label-invalid
This non-critical error occurs when Mimir receives a write request that contains a series with an invalid label name.
@@ -1979,7 +2138,7 @@ How to manually upload blocks from ingesters to the bucket:
The blocks and WAL stored in the ingester persistent disk are the last fence of defence in case of an incident involving blocks not shipped to the bucket or corrupted blocks in the bucket. If the data integrity in the ingester's disk is at risk (eg. close to hit the TSDB retention period or close to reach max disk utilisation), you should freeze it taking a **disk snapshot**.
-To take a **GCP persistent disk snapshot**:
+To take a **GCP Persistent Disk snapshot**:
1. Identify the Kubernetes PVC volume name (`kubectl get pvc --namespace `) of the volumes to snapshot
2. For each volume, [create a snapshot](https://console.cloud.google.com/compute/snapshotsAdd) from the GCP console ([documentation](https://cloud.google.com/compute/docs/disks/create-snapshots))
@@ -2102,15 +2261,28 @@ After this preparation, one can use `kubectl exec --tty=false --stdin=false clon
# Project ID: your google project ID
```
-### Deleting a StatefulSet with persistent volumes
+### Deleting or scaling a StatefulSet with persistent volumes
+
+When you delete or scale down a Kubernetes StatefulSet whose pods have persistent volume claims (PVCs), the unused PVCs are not automatically deleted by default.
+This means that if the StatefulSet is recreated or scaled back up, the pods for which there was already a PVC will get the volume mounted previously.
-When you delete a Kubernetes StatefulSet whose pods have persistent volume claims (PVC), the PVCs are not automatically deleted. This means that if the StatefulSet is recreated, the pods for which there was already a PVC will get the volume mounted previously.
+However, this behaviour can be changed [as of Kubernetes 1.27](https://kubernetes.io/blog/2023/05/04/kubernetes-1-27-statefulset-pvc-auto-deletion-beta/).
+If `spec.persistentVolumeClaimRetentionPolicy.whenScaled` is set to `Delete`, unused PVCs will be deleted when the StatefulSet is scaled down.
+Similarly, if `spec.persistentVolumeClaimRetentionPolicy.whenDeleted` is set to `Delete`, all PVCs will be deleted when the StatefulSet is deleted.
+Note that neither of these behaviours apply when a StatefulSet is scaled up, a rolling update is performed or pods are shifted between nodes.
-A PVC can be manually deleted by an operator. When a PVC claim is deleted, what happens to the volume depends on its [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming):
+When a PVC is deleted, what happens to the persistent volume (PV) it is bound to depends on its [reclaim policy](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming):
-- `Retain`: the volume will not be deleted until the PV resource will be manually deleted from Kubernetes
+- `Retain`: the volume will not be deleted automatically, and will need to be manually deleted
- `Delete`: the volume will be automatically deleted
+The initial reclaim policy for a PV is defined by its associated storage class.
+However, once the PV has been created, the PV's reclaim policy can be changed at any time, allowing it to be retained for further examination after the PVC has been deleted.
+For example, if the StatefulSet has `spec.persistentVolumeClaimRetentionPolicy.whenScaled` set to `Delete` and the PV has its reclaim policy set to `Delete`,
+but you wish to retain a PV for a pod that will be removed when scaling down the StatefulSet, you should change the affected PV's reclaim policy to `Retain` before scaling down the StatefulSet.
+
+To set a PV's reclaim policy to `Retain`, use `kubectl patch pv`: `kubectl patch pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'`
+
### Recover accidentally deleted blocks (Google Cloud specific)
_This runbook assumes you've enabled versioning in your GCS bucket and the retention of deleted blocks didn't expire yet._
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/alertmanager-resources/mimir-alertmanager-resources.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/alertmanager-resources/mimir-alertmanager-resources.png
index a03b4571b89..b4929341c21 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/alertmanager-resources/mimir-alertmanager-resources.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/alertmanager-resources/mimir-alertmanager-resources.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/alertmanager/mimir-alertmanager.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/alertmanager/mimir-alertmanager.png
index 50a8a285288..5b79f9b581f 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/alertmanager/mimir-alertmanager.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/alertmanager/mimir-alertmanager.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/compactor-resources/mimir-compactor-resources.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/compactor-resources/mimir-compactor-resources.png
index 8027628c8f8..b848001b030 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/compactor-resources/mimir-compactor-resources.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/compactor-resources/mimir-compactor-resources.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/compactor/mimir-compactor.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/compactor/mimir-compactor.png
index 01fd3dd4717..1e2810cd1c0 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/compactor/mimir-compactor.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/compactor/mimir-compactor.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/config/mimir-config.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/config/mimir-config.png
index 753dfa85515..f97bd3f374c 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/config/mimir-config.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/config/mimir-config.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/object-store/mimir-object-store.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/object-store/mimir-object-store.png
index 663cde06076..91c081952ee 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/object-store/mimir-object-store.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/object-store/mimir-object-store.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overrides/mimir-overrides.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overrides/mimir-overrides.png
index e4db9c56c52..433601973e9 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overrides/mimir-overrides.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overrides/mimir-overrides.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview-networking/mimir-overview-networking.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview-networking/mimir-overview-networking.png
index 779a82579ff..ab241a098c4 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview-networking/mimir-overview-networking.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview-networking/mimir-overview-networking.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview-resources/mimir-overview-resources.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview-resources/mimir-overview-resources.png
index 4ec2d1df9b6..fa882b44b95 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview-resources/mimir-overview-resources.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview-resources/mimir-overview-resources.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview/mimir-overview.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview/mimir-overview.png
index 4350580a589..0525c42ec6d 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview/mimir-overview.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/overview/mimir-overview.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/queries/mimir-queries.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/queries/mimir-queries.png
index 19c72529283..48067775600 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/queries/mimir-queries.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/queries/mimir-queries.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads-networking/mimir-reads-networking.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads-networking/mimir-reads-networking.png
index 5d7c379b682..f83620ee50b 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads-networking/mimir-reads-networking.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads-networking/mimir-reads-networking.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads-resources/mimir-reads-resources.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads-resources/mimir-reads-resources.png
index 9ad0cf52a28..b678627505b 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads-resources/mimir-reads-resources.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads-resources/mimir-reads-resources.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads/mimir-reads.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads/mimir-reads.png
index c9afa6d2998..c481fd2e1c2 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads/mimir-reads.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/reads/mimir-reads.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-networking/index.md b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-networking/index.md
new file mode 100644
index 00000000000..da16e4876d1
--- /dev/null
+++ b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-networking/index.md
@@ -0,0 +1,19 @@
+---
+description: View an example Remote ruler reads networking dashboard.
+menuTitle: Remote ruler reads networking
+title: Grafana Mimir Remote ruler reads networking dashboard
+weight: 110
+---
+
+# Grafana Mimir Remote ruler reads networking dashboard
+
+The Remote ruler reads networking dashboard shows receive and transmit bandwidth, in-flight requests, and TCP connections for ruler query path components when remote operational mode is enabled.
+The dashboard isolates each service on the remote ruler read path into its own section and displays the order in which a read request flows.
+
+This dashboard requires [additional resources metrics]({{< relref "../../requirements#additional-resources-metrics" >}}).
+
+## Example
+
+The following example shows a Remote ruler reads networking dashboard from a demo cluster.
+
+![Grafana Mimir Remote ruler reads networking dashboard](mimir-remote-ruler-reads-networking.png)
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-networking/mimir-remote-ruler-reads-networking.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-networking/mimir-remote-ruler-reads-networking.png
new file mode 100644
index 00000000000..10046563cff
Binary files /dev/null and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-networking/mimir-remote-ruler-reads-networking.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png
index ae4cefb442a..289564ccdf8 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png
index 84ec7b7b8ad..2b77c52f2b3 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/rollout-progress/mimir-rollout-progress.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/rollout-progress/mimir-rollout-progress.png
index d17c9d51fbc..56348447542 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/rollout-progress/mimir-rollout-progress.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/rollout-progress/mimir-rollout-progress.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/ruler/mimir-ruler.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/ruler/mimir-ruler.png
index c6120250d90..d4ee25ec998 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/ruler/mimir-ruler.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/ruler/mimir-ruler.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/scaling/mimir-scaling.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/scaling/mimir-scaling.png
index 256dee2f890..5c3ee1f29b9 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/scaling/mimir-scaling.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/scaling/mimir-scaling.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/tenants/mimir-tenants.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/tenants/mimir-tenants.png
index 64a846d810d..0afaf81bce9 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/tenants/mimir-tenants.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/tenants/mimir-tenants.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes-networking/mimir-writes-networking.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes-networking/mimir-writes-networking.png
index 5d60c35f5dd..390bd2b1780 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes-networking/mimir-writes-networking.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes-networking/mimir-writes-networking.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes-resources/mimir-writes-resources.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes-resources/mimir-writes-resources.png
index a556be873e7..9aef1058f09 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes-resources/mimir-writes-resources.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes-resources/mimir-writes-resources.png differ
diff --git a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes/mimir-writes.png b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes/mimir-writes.png
index bd9f6d4ffb0..4841628c1b4 100644
Binary files a/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes/mimir-writes.png and b/docs/sources/mimir/manage/monitor-grafana-mimir/dashboards/writes/mimir-writes.png differ
diff --git a/docs/sources/mimir/references/architecture/components/compactor/index.md b/docs/sources/mimir/references/architecture/components/compactor/index.md
index 105903e9567..019db1dbac0 100644
--- a/docs/sources/mimir/references/architecture/components/compactor/index.md
+++ b/docs/sources/mimir/references/architecture/components/compactor/index.md
@@ -126,11 +126,23 @@ The storage retention is disabled by default, and no data will be deleted from t
For more information, refer to [Configure metrics storage retention]({{< relref "../../../../configure/configure-metrics-storage-retention" >}}).
-## Compactor disk utilization
+## Compactor scratch storage volume
+
+Each compactor uses a storage device mounted at `-compactor.data-dir` to temporarily store:
+
+- files downloaded from object storage used as input to compaction
+- block files produced by the compactor to be uploaded to object storage
-The compactor needs to download blocks from the bucket to the local disk, and the compactor needs to store compacted blocks to the local disk before uploading them to the bucket. The largest tenants may need a lot of disk space.
+{{% admonition type="note" %}}
+While the compactor is a stateless service, it's recommended that you configure the compactor to store its temporary files somewhere other than the root volume. This avoids I/O contention with other workloads running on the system.
+Common volume types include a local SSD or a cloud provider's block storage service.
+
+In Kubernetes, run compactors as a StatefulSet so that each Pod has a dedicated volume.
+{{% /admonition %}}
+
+## Compactor disk utilization
-Assuming `max_compaction_range_blocks_size` is the total block size for the largest tenant during the longest `-compactor.block-ranges` period, the expression that estimates the minimum disk space required is:
+Large tenants may require a lot of disk space. Assuming `max_compaction_range_blocks_size` is the total block size for the largest tenant during the longest `-compactor.block-ranges` period, the expression that estimates the minimum disk space required is:
```
compactor.compaction-concurrency * max_compaction_range_blocks_size * 2
diff --git a/docs/sources/mimir/send/native-histograms/_index.md b/docs/sources/mimir/send/native-histograms/_index.md
index 452deaea148..e21f68cb0e6 100644
--- a/docs/sources/mimir/send/native-histograms/_index.md
+++ b/docs/sources/mimir/send/native-histograms/_index.md
@@ -84,7 +84,7 @@ In Go, the `NativeHistogramBucketFactor` option sets an upper limit of the relat
Some of the resulting buckets for factor `1.1` rounded to two decimal places are:
-..., (0.84, 0.94], (0.92, 1], (1, 1.09], (1.09, 1.19], (1.19, 1.30], ...
+..., (0.84, 0.92], (0.92, 1], (1, 1.09], (1.09, 1.19], (1.19, 1.30], ...
..., (76.1, 83], (83, 91], (91, 99], ...
diff --git a/go.mod b/go.mod
index 64cc3adcb8f..b921418596a 100644
--- a/go.mod
+++ b/go.mod
@@ -16,11 +16,11 @@ require (
github.com/go-openapi/swag v0.22.9
github.com/gogo/protobuf v1.3.2
github.com/gogo/status v1.1.1
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4
github.com/google/gopacket v1.1.19
github.com/gorilla/mux v1.8.1
- github.com/grafana/dskit v0.0.0-20240311184239-73feada6c0d7
+ github.com/grafana/dskit v0.0.0-20240403100540-1435abf0da58
github.com/grafana/e2e v0.1.2-0.20240118170847-db90b84177fc
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/json-iterator/go v1.1.12
@@ -44,7 +44,7 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.3.0
golang.org/x/crypto v0.21.0
- golang.org/x/net v0.22.0
+ golang.org/x/net v0.23.0
golang.org/x/sync v0.6.0
golang.org/x/time v0.5.0
google.golang.org/grpc v1.62.1
@@ -80,7 +80,7 @@ require (
go.opentelemetry.io/otel v1.24.0
go.opentelemetry.io/otel/trace v1.24.0
go.uber.org/multierr v1.11.0
- golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
+ golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
google.golang.org/api v0.168.0
google.golang.org/protobuf v1.33.0
sigs.k8s.io/kustomize/kyaml v0.16.0
@@ -104,10 +104,11 @@ require (
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
github.com/pierrec/lz4/v4 v4.1.19 // indirect
+ github.com/pires/go-proxyproto v0.7.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
- k8s.io/apimachinery v0.29.2 // indirect
- k8s.io/client-go v0.29.2 // indirect
+ k8s.io/apimachinery v0.29.3 // indirect
+ k8s.io/client-go v0.29.3 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
)
@@ -256,7 +257,7 @@ require (
)
// Using a fork of Prometheus with Mimir-specific changes.
-replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20240319094147-5a0ec4187ab5
+replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20240327215316-a97e07f28d7b
// Replace memberlist with our fork which includes some fixes that haven't been
// merged upstream yet:
diff --git a/go.sum b/go.sum
index 0b551f4df8d..4cf5591312f 100644
--- a/go.sum
+++ b/go.sum
@@ -405,8 +405,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -496,8 +496,8 @@ github.com/gosimple/slug v1.1.1 h1:fRu/digW+NMwBIP+RmviTK97Ho/bEj/C9swrCspN3D4=
github.com/gosimple/slug v1.1.1/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0=
github.com/grafana-tools/sdk v0.0.0-20220919052116-6562121319fc h1:PXZQA2WCxe85Tnn+WEvr8fDpfwibmEPgfgFEaC87G24=
github.com/grafana-tools/sdk v0.0.0-20220919052116-6562121319fc/go.mod h1:AHHlOEv1+GGQ3ktHMlhuTUwo3zljV3QJbC0+8o2kn+4=
-github.com/grafana/dskit v0.0.0-20240311184239-73feada6c0d7 h1:yd9yoNgEOtp8O0MbtqXoMVqr+ZbU4oZFE8a04z8WXFE=
-github.com/grafana/dskit v0.0.0-20240311184239-73feada6c0d7/go.mod h1:RpTvZ9nkdXqyQro5DULQHJl9B6vwvEj95Dk6WIXqTLQ=
+github.com/grafana/dskit v0.0.0-20240403100540-1435abf0da58 h1:ph674hL86kFIWcrqUCXW/D0RdSFu2ToIjqvzRnPAzPg=
+github.com/grafana/dskit v0.0.0-20240403100540-1435abf0da58/go.mod h1:HvSf3uf8Ps2vPpzHeAFyZTdUcbVr+Rxpq1xcx7J/muc=
github.com/grafana/e2e v0.1.2-0.20240118170847-db90b84177fc h1:BW+LjKJDz0So5LI8UZfW5neWeKpSkWqhmGjQFzcFfLM=
github.com/grafana/e2e v0.1.2-0.20240118170847-db90b84177fc/go.mod h1:JVmqPBe8A/pZWwRoJW5ZjyALeY5OXMzPl7LrVXOdZAI=
github.com/grafana/goautoneg v0.0.0-20231010094147-47ce5e72a9ae h1:Yxbw9jKGJVC6qAK5Ubzzb/qZwM6rRMMqaDc/d4Vp3pM=
@@ -506,8 +506,8 @@ github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wp
github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/grafana/mimir-prometheus v0.0.0-20240319094147-5a0ec4187ab5 h1:4QJcg+4y8r2nzDbnUe19n9umUbJNRve3HLQFbW+qlac=
-github.com/grafana/mimir-prometheus v0.0.0-20240319094147-5a0ec4187ab5/go.mod h1:8ChS3ZH+BHOQvOHdBQJykzvuCcPyCPkCSxEGtOOoQU0=
+github.com/grafana/mimir-prometheus v0.0.0-20240327215316-a97e07f28d7b h1:676ls3Ic96j2TbIQQ9ZE1KA2cyCd7B3edaR2vBv5pvs=
+github.com/grafana/mimir-prometheus v0.0.0-20240327215316-a97e07f28d7b/go.mod h1:b4OU/wbfHR/BeRhxP05ziVQXH4uDApBt4MQjpzaN3No=
github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0=
github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo=
@@ -661,8 +661,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
-github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV4=
-github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk=
+github.com/linode/linodego v1.30.0 h1:6HJli+LX7NGu+Sne2G+ux790EkVOWOV/SR4mK3jcs6k=
+github.com/linode/linodego v1.30.0/go.mod h1:/46h/XpmWi//oSA92GX2p3FIxb8HbX7grslPPQalR2o=
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@@ -758,6 +758,8 @@ github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
github.com/pierrec/lz4/v4 v4.1.19 h1:tYLzDnjDXh9qIxSTKHwXwOYmm9d887Y7Y1ZkyXYHAN4=
github.com/pierrec/lz4/v4 v4.1.19/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs=
+github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -987,8 +989,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
-golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw=
+golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -1072,8 +1074,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
-golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1524,12 +1526,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A=
-k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0=
-k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8=
-k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU=
-k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg=
-k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA=
+k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
+k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
+k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
+k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
+k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
+k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
diff --git a/integration/backward_compatibility.go b/integration/backward_compatibility.go
index eb10d87a138..a307d192a14 100644
--- a/integration/backward_compatibility.go
+++ b/integration/backward_compatibility.go
@@ -11,12 +11,11 @@ var DefaultPreviousVersionImages = map[string]e2emimir.FlagMapper{
e2emimir.SetFlagMapper(map[string]string{"-ingester.ring.readiness-check-ring-health": "false"}),
e2emimir.RemoveFlagMapper([]string{"-ingester.native-histograms-ingestion-enabled"}),
),
- "grafana/mimir:2.4.0": e2emimir.RemoveFlagMapper([]string{"-ingester.native-histograms-ingestion-enabled"}),
- "grafana/mimir:2.5.0": e2emimir.RemoveFlagMapper([]string{"-ingester.native-histograms-ingestion-enabled"}),
"grafana/mimir:2.6.0": e2emimir.RemoveFlagMapper([]string{"-ingester.native-histograms-ingestion-enabled"}),
"grafana/mimir:2.7.1": e2emimir.NoopFlagMapper,
"grafana/mimir:2.8.0": e2emimir.NoopFlagMapper,
"grafana/mimir:2.9.1": e2emimir.NoopFlagMapper,
"grafana/mimir:2.10.0": e2emimir.NoopFlagMapper,
"grafana/mimir:2.11.0": e2emimir.NoopFlagMapper,
+ "grafana/mimir:2.12.0": e2emimir.NoopFlagMapper,
}
diff --git a/integration/e2emimir/services.go b/integration/e2emimir/services.go
index 945edaa9948..0184b0c362d 100644
--- a/integration/e2emimir/services.go
+++ b/integration/e2emimir/services.go
@@ -407,7 +407,7 @@ func WithConfigFile(configFile string) Option {
}
// WithNoopOption returns an option that doesn't change anything.
-func WithNoopOption() Option { return func(options *Options) {} }
+func WithNoopOption() Option { return func(*Options) {} }
// FlagMapper is the type of function that maps flags, just to reduce some verbosity.
type FlagMapper func(flags map[string]string) map[string]string
diff --git a/integration/kv_test.go b/integration/kv_test.go
index beedaf559ed..cf9b725919e 100644
--- a/integration/kv_test.go
+++ b/integration/kv_test.go
@@ -31,7 +31,7 @@ func TestKVList(t *testing.T) {
// Create keys to list back
keysToCreate := []string{"key-a", "key-b", "key-c"}
for _, key := range keysToCreate {
- err := client.CAS(context.Background(), key, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := client.CAS(context.Background(), key, func(interface{}) (out interface{}, retry bool, err error) {
return key, false, nil
})
require.NoError(t, err, "could not create key")
@@ -53,7 +53,7 @@ func TestKVList(t *testing.T) {
func TestKVDelete(t *testing.T) {
testKVs(t, func(t *testing.T, client kv.Client, reg *prometheus.Registry) {
// Create a key
- err := client.CAS(context.Background(), "key-to-delete", func(in interface{}) (out interface{}, retry bool, err error) {
+ err := client.CAS(context.Background(), "key-to-delete", func(interface{}) (out interface{}, retry bool, err error) {
return "key-to-delete", false, nil
})
require.NoError(t, err, "object could not be created")
@@ -76,11 +76,11 @@ func TestKVDelete(t *testing.T) {
}
func TestKVWatchAndDelete(t *testing.T) {
- testKVs(t, func(t *testing.T, client kv.Client, reg *prometheus.Registry) {
+ testKVs(t, func(t *testing.T, client kv.Client, _ *prometheus.Registry) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- err := client.CAS(context.Background(), "key-before-watch", func(in interface{}) (out interface{}, retry bool, err error) {
+ err := client.CAS(context.Background(), "key-before-watch", func(interface{}) (out interface{}, retry bool, err error) {
return "value-before-watch", false, nil
})
require.NoError(t, err)
@@ -93,7 +93,7 @@ func TestKVWatchAndDelete(t *testing.T) {
w.watch(ctx, client)
}()
- err = client.CAS(context.Background(), "key-to-delete", func(in interface{}) (out interface{}, retry bool, err error) {
+ err = client.CAS(context.Background(), "key-to-delete", func(interface{}) (out interface{}, retry bool, err error) {
return "value-to-delete", false, nil
})
require.NoError(t, err, "object could not be created")
diff --git a/mimir-build-image/Dockerfile b/mimir-build-image/Dockerfile
index c0f28d7c6c7..d069b3fff67 100644
--- a/mimir-build-image/Dockerfile
+++ b/mimir-build-image/Dockerfile
@@ -4,8 +4,8 @@
# Provenance-includes-copyright: The Cortex Authors.
FROM registry.k8s.io/kustomize/kustomize:v5.3.0 as kustomize
-FROM alpine/helm:3.14.2 as helm
-FROM golang:1.21.8-bookworm
+FROM alpine/helm:3.14.3 as helm
+FROM golang:1.22.2-bookworm
ARG goproxyValue
ENV GOPROXY=${goproxyValue}
ENV SKOPEO_DEPS="libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev pkg-config"
@@ -47,7 +47,7 @@ RUN GO111MODULE=on \
go install github.com/golang/protobuf/protoc-gen-go@v1.3.1 && \
go install github.com/gogo/protobuf/protoc-gen-gogoslick@v1.3.0 && \
go install github.com/weaveworks/tools/cover@bdd647e92546027e12cdde3ae0714bb495e43013 && \
- go install github.com/fatih/faillint@v1.11.0 && \
+ go install github.com/fatih/faillint@v1.12.0 && \
go install github.com/campoy/embedmd@v1.0.0 && \
go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@v0.4.0 && \
go install github.com/monitoring-mixins/mixtool/cmd/mixtool@bca3066 && \
diff --git a/operations/compare-helm-with-jsonnet/plugins/resolve-config/main.go b/operations/compare-helm-with-jsonnet/plugins/resolve-config/main.go
index da0a6257500..43dee755be7 100644
--- a/operations/compare-helm-with-jsonnet/plugins/resolve-config/main.go
+++ b/operations/compare-helm-with-jsonnet/plugins/resolve-config/main.go
@@ -114,7 +114,7 @@ func (c *ConfigExtractor) ResolveConfigs() ([]*yaml.RNode, error) {
return nil, err
}
- err = concurrency.ForEachJob(context.Background(), len(c.allItems), runtime.NumCPU(), func(ctx context.Context, idx int) error {
+ err = concurrency.ForEachJob(context.Background(), len(c.allItems), runtime.NumCPU(), func(_ context.Context, idx int) error {
pod, ok, err := extractPodSpec(c.allItems[idx])
if err != nil {
return err
diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md
index 5a3c0066da5..d47abfacd19 100644
--- a/operations/helm/charts/mimir-distributed/CHANGELOG.md
+++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md
@@ -19,10 +19,11 @@ This changelog is continued from `enterprise-metrics` after Grafana Enterprise M
All notable changes to this chart will be documented in this file.
Entries should be ordered as follows:
-- [CHANGE]
-- [FEATURE]
-- [ENHANCEMENT]
-- [BUGFIX]
+
+* [CHANGE]
+* [FEATURE]
+* [ENHANCEMENT]
+* [BUGFIX]
Entries should include a reference to the Pull Request that introduced the change.
@@ -43,10 +44,12 @@ Entries should include a reference to the Pull Request that introduced the chang
* [CHANGE] Remove `-server.grpc.keepalive.max-connection-age` and `-server.grpc.keepalive.max-connection-age-grace` from default config. The configuration now applied directly to distributor, fixing parity with jsonnet. #7269
* [CHANGE] Remove `-server.grpc.keepalive.max-connection-idle` from default config. The configuration now applied directly to distributor, fixing parity with jsonnet. #7298
* [CHANGE] Distributor: termination grace period increased from 60s to 100s.
-* [FEATURE] Added experimental feature for deploying [KEDA](https://keda.sh) ScaledObjects as part of the helm chart for the components: distributor, querier, query-frontend and ruler. #7282 #7392
+* [CHANGE] Memcached: Change default read timeout for chunks and index caches to `750ms` from `450ms`. #7778
+* [FEATURE] Added experimental feature for deploying [KEDA](https://keda.sh) ScaledObjects as part of the helm chart for the components: distributor, querier, query-frontend and ruler. #7282 #7392 #7431 #7679
* Autoscaling can be enabled via `distributor.kedaAutoscaling`, `ruler.kedaAutoscaling`, `query_frontend.kedaAutoscaling`, and `querier.kedaAutoscaling`.
* Global configuration of `promtheusAddress`, `pollingInterval` and `customHeaders` can be found in `kedaAutoscaling`section.
- * Requires metamonitoring or custom installed Prometheus compatible solution, for more details on metamonitoring see [Monitor the health of your system](https://grafana.com/docs/helm-charts/mimir-distributed/latest/run-production-environment-with-helm/monitor-system-health/). See [grafana/mimir#7367](https://github.com/grafana/mimir/issues/7367) for a migration procedure.
+ * Requires metamonitoring or custom installed Prometheus compatible solution, for more details on metamonitoring see [Monitor the health of your system](https://grafana.com/docs/helm-charts/mimir-distributed/latest/run-production-environment-with-helm/monitor-system-health/).
+ * For migration please use `preserveReplicas` option of each component. This option should be enabled, when first time enabling KEDA autoscaling for a component, to preserve the current number of replicas. After the autoscaler takes over and is ready to scale the component, this option can be disabled. After disabling this option, the `replicas` field inside the components deployment will be ignored and the autoscaler will manage the number of replicas.
* [FEATURE] Gateway: Allow to configure whether or not NGINX binds IPv6 via `gateway.nginx.config.enableIPv6`. #7421
* [ENHANCEMENT] Add `jaegerReporterMaxQueueSize` Helm value for all components where configuring `JAEGER_REPORTER_MAX_QUEUE_SIZE` makes sense, and override the Jaeger client's default value of 100 for components expected to generate many trace spans. #7068 #7086 #7259
* [ENHANCEMENT] Rollout-operator: upgraded to v0.13.0. #7469
@@ -58,6 +61,7 @@ Entries should include a reference to the Pull Request that introduced the chang
* [ENHANCEMENT] Make the PSP template configurable via `rbac.podSecurityPolicy`. #7190
* [ENHANCEMENT] Recording rules: add native histogram recording rules to `cortex_request_duration_seconds`. #7528
* [ENHANCEMENT] Make the port used in ServiceMonitor for kube-state-metrics configurable. #7507
+* [ENHANCEMENT] Produce a clearer error messages when multiple X-Scope-OrgID headers are present. #7704
* [ENHANCEMENT] Dashboards: allow switching between using classic of native histograms in dashboards. #7627
* Overview dashboard, Status panel, `cortex_request_duration_seconds` metric.
* [BUGFIX] Metamonitoring: update dashboards to drop unsupported `step` parameter in targets. #7157
diff --git a/operations/helm/charts/mimir-distributed/Chart.lock b/operations/helm/charts/mimir-distributed/Chart.lock
index ae8835b2d4a..79438a8434e 100644
--- a/operations/helm/charts/mimir-distributed/Chart.lock
+++ b/operations/helm/charts/mimir-distributed/Chart.lock
@@ -4,9 +4,9 @@ dependencies:
version: 5.0.14
- name: grafana-agent-operator
repository: https://grafana.github.io/helm-charts
- version: 0.3.18
+ version: 0.3.19
- name: rollout-operator
repository: https://grafana.github.io/helm-charts
version: 0.14.0
-digest: sha256:fd653f2ba2f57fdbd6f428278d50e4023ae668356bd59f402d99908da0781354
-generated: "2024-03-11T04:57:44.724650917Z"
+digest: sha256:75efdd747392eb84047470b37fb55fc03d99c891cf9cbcf97463278e57e92b39
+generated: "2024-04-02T14:35:36.399712014Z"
diff --git a/operations/helm/charts/mimir-distributed/Chart.yaml b/operations/helm/charts/mimir-distributed/Chart.yaml
index 0a4b53713b4..eebe5bcdfe6 100644
--- a/operations/helm/charts/mimir-distributed/Chart.yaml
+++ b/operations/helm/charts/mimir-distributed/Chart.yaml
@@ -1,6 +1,6 @@
apiVersion: v2
-version: 5.3.0-weekly.281
-appVersion: r281
+version: 5.3.0-weekly.284
+appVersion: r284
description: "Grafana Mimir"
home: https://grafana.com/docs/helm-charts/mimir-distributed/latest/
icon: https://grafana.com/static/img/logos/logo-mimir.svg
@@ -14,7 +14,7 @@ dependencies:
condition: minio.enabled
- name: grafana-agent-operator
alias: grafana-agent-operator
- version: 0.3.18
+ version: 0.3.19
repository: https://grafana.github.io/helm-charts
condition: metaMonitoring.grafanaAgent.installOperator
- name: rollout-operator
diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md
index aff97744e55..ae5fbfb797c 100644
--- a/operations/helm/charts/mimir-distributed/README.md
+++ b/operations/helm/charts/mimir-distributed/README.md
@@ -4,7 +4,7 @@ Helm chart for deploying [Grafana Mimir](https://grafana.com/docs/mimir/latest/)
For the full documentation, visit [Grafana mimir-distributed Helm chart documentation](https://grafana.com/docs/helm-charts/mimir-distributed/latest/).
-> **Note:** The documentation version is derived from the Helm chart version which is 5.3.0-weekly.281.
+> **Note:** The documentation version is derived from the Helm chart version which is 5.3.0-weekly.284.
When upgrading from Helm chart version 4.X, please see [Migrate the Helm chart from version 4.x to 5.0](https://grafana.com/docs/helm-charts/mimir-distributed/latest/migration-guides/migrate-helm-chart-4.x-to-5.0/).
When upgrading from Helm chart version 3.x, please see [Migrate from single zone to zone-aware replication with Helm](https://grafana.com/docs/helm-charts/mimir-distributed/latest/migration-guides/migrate-from-single-zone-with-helm/).
@@ -14,7 +14,7 @@ When upgrading from Helm chart version 2.1, please see [Upgrade the Grafana Mimi
# mimir-distributed
-![Version: 5.3.0-weekly.281](https://img.shields.io/badge/Version-5.3.0--weekly.281-informational?style=flat-square) ![AppVersion: r281](https://img.shields.io/badge/AppVersion-r281-informational?style=flat-square)
+![Version: 5.3.0-weekly.284](https://img.shields.io/badge/Version-5.3.0--weekly.284-informational?style=flat-square) ![AppVersion: r284](https://img.shields.io/badge/AppVersion-r284-informational?style=flat-square)
Grafana Mimir
@@ -25,7 +25,7 @@ Kubernetes: `^1.20.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://charts.min.io/ | minio(minio) | 5.0.14 |
-| https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.3.18 |
+| https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.3.19 |
| https://grafana.github.io/helm-charts | rollout_operator(rollout-operator) | 0.14.0 |
# Contributing and releasing
diff --git a/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-global-values.yaml b/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-global-values.yaml
index 51adc3a8d43..be0e8535787 100644
--- a/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-global-values.yaml
+++ b/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-global-values.yaml
@@ -8,6 +8,7 @@ kedaAutoscaling:
X-Scope-OrgID: tenant
distributor:
+ replicas: 5
kedaAutoscaling:
enabled: true
minReplicaCount: 1
@@ -16,6 +17,7 @@ distributor:
targetMemoryUtilizationPercentage: 80
ruler:
+ replicas: 5
kedaAutoscaling:
enabled: true
minReplicaCount: 1
@@ -24,6 +26,7 @@ ruler:
targetMemoryUtilizationPercentage: 80
querier:
+ replicas: 5
kedaAutoscaling:
enabled: true
minReplicaCount: 2
@@ -31,6 +34,7 @@ querier:
querySchedulerInflightRequestsThreshold: 6
query_frontend:
+ replicas: 5
kedaAutoscaling:
enabled: true
minReplicaCount: 1
diff --git a/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-values.yaml b/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-values.yaml
index bd079f820bb..e8466b79cb6 100644
--- a/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-values.yaml
+++ b/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-values.yaml
@@ -11,6 +11,7 @@ metaMonitoring:
distributor:
kedaAutoscaling:
enabled: true
+ preserveReplicas: true
minReplicaCount: 1
maxReplicaCount: 10
targetCPUUtilizationPercentage: 80
@@ -19,6 +20,7 @@ distributor:
ruler:
kedaAutoscaling:
enabled: true
+ preserveReplicas: true
minReplicaCount: 1
maxReplicaCount: 10
targetCPUUtilizationPercentage: 80
@@ -27,6 +29,7 @@ ruler:
querier:
kedaAutoscaling:
enabled: true
+ preserveReplicas: true
minReplicaCount: 2
maxReplicaCount: 10
querySchedulerInflightRequestsThreshold: 6
@@ -34,6 +37,7 @@ querier:
query_frontend:
kedaAutoscaling:
enabled: true
+ preserveReplicas: true
minReplicaCount: 1
maxReplicaCount: 10
targetCPUUtilizationPercentage: 80
diff --git a/operations/helm/charts/mimir-distributed/templates/_helpers.tpl b/operations/helm/charts/mimir-distributed/templates/_helpers.tpl
index 5890c4614eb..ba0000e8010 100644
--- a/operations/helm/charts/mimir-distributed/templates/_helpers.tpl
+++ b/operations/helm/charts/mimir-distributed/templates/_helpers.tpl
@@ -681,3 +681,19 @@ mimir.parseCPU takes 1 argument
{{- $value_string }}
{{- end -}}
{{- end -}}
+
+{{/*
+cpuToMilliCPU is used to convert Kubernetes CPU units to MilliCPU.
+The returned value is a string representation. If you need to do any math on it, please parse the string first.
+
+mimir.cpuToMilliCPU takes 1 argument
+ .value = the Kubernetes CPU request value
+*/}}
+{{- define "mimir.cpuToMilliCPU" -}}
+ {{- $value_string := .value | toString -}}
+ {{- if (hasSuffix "m" $value_string) -}}
+ {{ trimSuffix "m" $value_string -}}
+ {{- else -}}
+ {{- $value_string | float64 | mulf 1000 | toString }}
+ {{- end -}}
+{{- end -}}
diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml
index 7f844b70ba2..3fccf573b60 100644
--- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml
+++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml
@@ -8,7 +8,7 @@ metadata:
{{- toYaml .Values.distributor.annotations | nindent 4 }}
namespace: {{ .Release.Namespace | quote }}
spec:
- {{- if not .Values.distributor.kedaAutoscaling.enabled }}
+ {{- if or (not .Values.distributor.kedaAutoscaling.enabled) (.Values.distributor.kedaAutoscaling.preserveReplicas) }}
# If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it
{{- if or (or (kindIs "int64" .Values.distributor.replicas) (kindIs "float64" .Values.distributor.replicas)) (.Values.distributor.replicas) }}
replicas: {{ .Values.distributor.replicas }}
diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-so.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-so.yaml
index b0ec110dd91..a0680f8786c 100644
--- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-so.yaml
+++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-so.yaml
@@ -27,7 +27,7 @@ spec:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="distributor",namespace="{{ .Release.Namespace }}"}[5m])) and max by (pod) (up{container="distributor",namespace="{{ .Release.Namespace }}"}) > 0)[15m:]) * 1000
serverAddress: {{ include "mimir.kedaPrometheusAddress" (dict "ctx" $) }}
{{- $cpu_request := dig "requests" "cpu" nil .Values.distributor.resources }}
- threshold: {{ mulf (include "mimir.parseCPU" (dict "value" $cpu_request)) (divf .Values.distributor.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }}
+ threshold: {{ mulf (include "mimir.cpuToMilliCPU" (dict "value" $cpu_request)) (divf .Values.distributor.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }}
{{- if .Values.kedaAutoscaling.customHeaders }}
customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.kedaAutoscaling.customHeaders)) | quote }}
{{- end }}
diff --git a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml
index 918b3b2b131..b9c6dbde6ae 100644
--- a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml
+++ b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml
@@ -8,7 +8,7 @@ metadata:
{{- toYaml .Values.querier.annotations | nindent 4 }}
namespace: {{ .Release.Namespace | quote }}
spec:
- {{- if not .Values.querier.kedaAutoscaling.enabled }}
+ {{- if or (not .Values.querier.kedaAutoscaling.enabled) (.Values.querier.kedaAutoscaling.preserveReplicas) }}
# If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it
{{- if or (or (kindIs "int64" .Values.querier.replicas) (kindIs "float64" .Values.querier.replicas)) (.Values.querier.replicas) }}
replicas: {{ .Values.querier.replicas }}
diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml
index e0d1e629d7c..132ab1d681a 100644
--- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml
+++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml
@@ -8,7 +8,7 @@ metadata:
{{- toYaml .Values.query_frontend.annotations | nindent 4 }}
namespace: {{ .Release.Namespace | quote }}
spec:
- {{- if not .Values.query_frontend.kedaAutoscaling.enabled }}
+ {{- if or (not .Values.query_frontend.kedaAutoscaling.enabled) (.Values.query_frontend.kedaAutoscaling.preserveReplicas) }}
# If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it
{{- if or (or (kindIs "int64" .Values.query_frontend.replicas) (kindIs "float64" .Values.query_frontend.replicas)) (.Values.query_frontend.replicas) }}
replicas: {{ .Values.query_frontend.replicas }}
diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-so.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
index 3de4ac58e7b..2ed95021b96 100644
--- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
+++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
@@ -27,7 +27,7 @@ spec:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="query-frontend",namespace="{{ .Release.Namespace }}"}[5m])) and max by (pod) (up{container="query-frontend",namespace="{{ .Release.Namespace }}"}) > 0)[15m:]) * 1000
serverAddress: {{ include "mimir.kedaPrometheusAddress" (dict "ctx" $) }}
{{- $cpu_request := dig "requests" "cpu" nil .Values.query_frontend.resources }}
- threshold: {{ mulf (include "mimir.parseCPU" (dict "value" $cpu_request)) (divf .Values.query_frontend.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }}
+ threshold: {{ mulf (include "mimir.cpuToMilliCPU" (dict "value" $cpu_request)) (divf .Values.query_frontend.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }}
{{- if .Values.kedaAutoscaling.customHeaders }}
customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.kedaAutoscaling.customHeaders)) | quote }}
{{- end }}
diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml
index 4d3ac294359..7169196bb13 100644
--- a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml
+++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml
@@ -9,7 +9,7 @@ metadata:
{{- toYaml .Values.ruler.annotations | nindent 4 }}
namespace: {{ .Release.Namespace | quote }}
spec:
- {{- if not .Values.ruler.kedaAutoscaling.enabled }}
+ {{- if or (not .Values.ruler.kedaAutoscaling.enabled) (.Values.ruler.kedaAutoscaling.preserveReplicas) }}
replicas: {{ .Values.ruler.replicas }}
{{- end }}
selector:
diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-so.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-so.yaml
index 19772c2b67a..670037e1a08 100644
--- a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-so.yaml
+++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-so.yaml
@@ -27,7 +27,7 @@ spec:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="ruler",namespace="{{ .Release.Namespace }}"}[5m])) and max by (pod) (up{container="ruler",namespace="{{ .Release.Namespace }}"}) > 0)[15m:]) * 1000
serverAddress: {{ include "mimir.kedaPrometheusAddress" (dict "ctx" $) }}
{{- $cpu_request := dig "requests" "cpu" nil .Values.ruler.resources }}
- threshold: {{ mulf (include "mimir.parseCPU" (dict "value" $cpu_request)) (divf .Values.ruler.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }}
+ threshold: {{ mulf (include "mimir.cpuToMilliCPU" (dict "value" $cpu_request)) (divf .Values.ruler.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }}
{{- if .Values.kedaAutoscaling.customHeaders }}
customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.kedaAutoscaling.customHeaders)) | quote }}
{{- end }}
diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml
index a0edf1dcc53..1299b4a3120 100644
--- a/operations/helm/charts/mimir-distributed/values.yaml
+++ b/operations/helm/charts/mimir-distributed/values.yaml
@@ -34,7 +34,7 @@ image:
# -- Grafana Mimir container image repository. Note: for Grafana Enterprise Metrics use the value 'enterprise.image.repository'
repository: grafana/mimir
# -- Grafana Mimir container image tag. Note: for Grafana Enterprise Metrics use the value 'enterprise.image.tag'
- tag: r281-93e069f
+ tag: r284-1a35693
# -- Container pull policy - shared between Grafana Mimir and Grafana Enterprise Metrics
pullPolicy: IfNotPresent
# -- Optionally specify an array of imagePullSecrets - shared between Grafana Mimir and Grafana Enterprise Metrics
@@ -183,7 +183,7 @@ mimir:
memcached:
addresses: {{ include "mimir.chunksCacheAddress" . }}
max_item_size: {{ mul (index .Values "chunks-cache").maxItemMemory 1024 1024 }}
- timeout: 450ms
+ timeout: 750ms
max_idle_connections: 150
{{- end }}
{{- if index .Values "index-cache" "enabled" }}
@@ -192,7 +192,7 @@ mimir:
memcached:
addresses: {{ include "mimir.indexCacheAddress" . }}
max_item_size: {{ mul (index .Values "index-cache").maxItemMemory 1024 1024 }}
- timeout: 450ms
+ timeout: 750ms
max_idle_connections: 150
{{- end }}
{{- if index .Values "metadata-cache" "enabled" }}
@@ -773,6 +773,9 @@ distributor:
# See https://github.com/grafana/mimir/issues/7367 for more details on how to migrate to autoscaled resources without disruptions.
kedaAutoscaling:
enabled: false
+ # -- preserveReplicas gives you the option to migrate from non-autoscaled to autoscaled deployments without losing replicas. When set to true, the replica fields in the component will be left intact.
+ # For futher details see [helm: autoscaling migration procedure](https://github.com/grafana/mimir/issues/7367)
+ preserveReplicas: false
minReplicaCount: 1
maxReplicaCount: 10
targetCPUUtilizationPercentage: 100
@@ -1168,6 +1171,9 @@ ruler:
# See https://github.com/grafana/mimir/issues/7367 for more details on how to migrate to autoscaled resources without disruptions.
kedaAutoscaling:
enabled: false
+ # -- preserveReplicas gives you the option to migrate from non-autoscaled to autoscaled deployments without losing replicas. When set to true, the replica fields in the component will be left intact.
+ # For futher details see [helm: autoscaling migration procedure](https://github.com/grafana/mimir/issues/7367)
+ preserveReplicas: false
minReplicaCount: 1
maxReplicaCount: 10
targetCPUUtilizationPercentage: 100
@@ -1274,6 +1280,9 @@ querier:
# See https://github.com/grafana/mimir/issues/7367 for more details on how to migrate to autoscaled resources without disruptions.
kedaAutoscaling:
enabled: false
+ # -- preserveReplicas gives you the option to migrate from non-autoscaled to autoscaled deployments without losing replicas. When set to true, the replica fields in the component will be left intact.
+ # For futher details see [helm: autoscaling migration procedure](https://github.com/grafana/mimir/issues/7367)
+ preserveReplicas: false
minReplicaCount: 1
maxReplicaCount: 10
querySchedulerInflightRequestsThreshold: 12
@@ -1381,6 +1390,9 @@ query_frontend:
# See https://github.com/grafana/mimir/issues/7367 for more details on how to migrate to autoscaled resources without disruptions.
kedaAutoscaling:
enabled: false
+ # -- preserveReplicas gives you the option to migrate from non-autoscaled to autoscaled deployments without losing replicas. When set to true, the replica fields in the component will be left intact.
+ # For futher details see [helm: autoscaling migration procedure](https://github.com/grafana/mimir/issues/7367)
+ preserveReplicas: false
minReplicaCount: 1
maxReplicaCount: 10
targetCPUUtilizationPercentage: 75
@@ -1875,7 +1887,7 @@ memcached:
# -- Memcached Docker image repository
repository: memcached
# -- Memcached Docker image tag
- tag: 1.6.24-alpine
+ tag: 1.6.25-alpine
# -- Memcached Docker image pull policy
pullPolicy: IfNotPresent
@@ -1898,7 +1910,7 @@ memcachedExporter:
image:
repository: prom/memcached-exporter
- tag: v0.14.2
+ tag: v0.14.3
pullPolicy: IfNotPresent
resources:
@@ -2589,6 +2601,11 @@ nginx:
"" "{{ include "mimir.noAuthTenant" . }}";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
@@ -2599,6 +2616,10 @@ nginx:
auth_basic_user_file /etc/nginx/secrets/.htpasswd;
{{- end }}
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
@@ -2975,6 +2996,11 @@ gateway:
"" "{{ include "mimir.noAuthTenant" . }}";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen {{ include "mimir.serverHttpListenPort" . }};
@@ -2987,6 +3013,10 @@ gateway:
auth_basic_user_file /etc/nginx/secrets/.htpasswd;
{{- end }}
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
@@ -3334,7 +3364,7 @@ enterprise:
# -- Grafana Enterprise Metrics container image repository. Note: for Grafana Mimir use the value 'image.repository'
repository: grafana/enterprise-metrics
# -- Grafana Enterprise Metrics container image tag. Note: for Grafana Mimir use the value 'image.tag'
- tag: r281-99ff9f91
+ tag: r284-05515399
# Note: pullPolicy and optional pullSecrets are set in toplevel 'image' section, not here
# In order to use Grafana Enterprise Metrics features, you will need to provide the contents of your Grafana Enterprise Metrics
@@ -3838,7 +3868,7 @@ gr-metricname-cache:
smoke_test:
image:
repository: grafana/mimir-continuous-test
- tag: r281-93e069f
+ tag: r284-1a35693
pullPolicy: IfNotPresent
tenantId: ""
extraArgs: {}
@@ -3858,7 +3888,7 @@ continuous_test:
replicas: 1
image:
repository: grafana/mimir-continuous-test
- tag: r281-93e069f
+ tag: r284-1a35693
pullPolicy: IfNotPresent
# Note: optional pullSecrets are set in toplevel 'image' section, not here
diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
index b7d5f6e6f11..057bb4747fc 100644
--- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
+++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
@@ -50,7 +50,7 @@ spec:
secretName: tls-certs
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -81,7 +81,7 @@ spec:
name: tls-certs
readOnly: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
index 5fbbedc49d6..8250cd0ac58 100644
--- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
+++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
@@ -50,7 +50,7 @@ spec:
secretName: tls-certs
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -81,7 +81,7 @@ spec:
name: tls-certs
readOnly: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
index a14e6514cd9..7a709b4bece 100644
--- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
+++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
@@ -50,7 +50,7 @@ spec:
secretName: tls-certs
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -81,7 +81,7 @@ spec:
name: tls-certs
readOnly: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/mimir-config.yaml
index 1024b546eaa..e0195d7b3d2 100644
--- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/mimir-config.yaml
+++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/mimir-config.yaml
@@ -57,14 +57,14 @@ data:
addresses: dns+enterprise-https-values-mimir-chunks-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 1048576
- timeout: 450ms
+ timeout: 750ms
index_cache:
backend: memcached
memcached:
addresses: dns+enterprise-https-values-mimir-index-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 5242880
- timeout: 450ms
+ timeout: 750ms
metadata_cache:
backend: memcached
memcached:
diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
index 0b5579bd6a9..c8687cdcd41 100644
--- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
+++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
@@ -50,7 +50,7 @@ spec:
secretName: tls-certs
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -81,7 +81,7 @@ spec:
name: tls-certs
readOnly: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/templates/gateway/nginx-configmap.yaml b/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/templates/gateway/nginx-configmap.yaml
index 7232166d425..54966777686 100644
--- a/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/templates/gateway/nginx-configmap.yaml
+++ b/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/templates/gateway/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-aggregation-cache/graphite-aggregation-cache-statefulset.yaml b/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-aggregation-cache/graphite-aggregation-cache-statefulset.yaml
index 67f1cd30fc3..5dd77dedb57 100644
--- a/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-aggregation-cache/graphite-aggregation-cache-statefulset.yaml
+++ b/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-aggregation-cache/graphite-aggregation-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-metric-name-cache/graphite-metric-name-cache-statefulset.yaml b/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-metric-name-cache/graphite-metric-name-cache-statefulset.yaml
index 41e1b27769e..5923e4e4b73 100644
--- a/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-metric-name-cache/graphite-metric-name-cache-statefulset.yaml
+++ b/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-metric-name-cache/graphite-metric-name-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
index 83b78e75ccb..b23e91d2c7b 100644
--- a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="distributor",namespace="citestns"}[5m])) and max by (pod) (up{container="distributor",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: https://mimir.example.com/prometheus
- threshold: "0"
+ threshold: "80"
customHeaders: "X-Scope-OrgID=tenant"
type: prometheus
- metadata:
diff --git a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index b78350ce70b..09b799e2c04 100644
--- a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
index 2a00d30d181..3ee0473e6fc 100644
--- a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="query-frontend",namespace="citestns"}[5m])) and max by (pod) (up{container="query-frontend",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: https://mimir.example.com/prometheus
- threshold: "0"
+ threshold: "80"
customHeaders: "X-Scope-OrgID=tenant"
type: prometheus
- metadata:
diff --git a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
index abcf4bd6cfd..e0b4a23d9d5 100644
--- a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m])) and max by (pod) (up{container="ruler",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: https://mimir.example.com/prometheus
- threshold: "0"
+ threshold: "80"
customHeaders: "X-Scope-OrgID=tenant"
type: prometheus
- metadata:
diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
index 1b9ff4b4e88..70d0deb0e2c 100644
--- a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="distributor",namespace="citestns"}[5m])) and max by (pod) (up{container="distributor",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: https://mimir.example.com/prometheus
- threshold: "0"
+ threshold: "80"
customHeaders: "X-Scope-OrgID=tenant-1"
type: prometheus
- metadata:
diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 9582225c713..c8b0b9dc87d 100644
--- a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
index 16ff3f5c46b..40f02242488 100644
--- a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="query-frontend",namespace="citestns"}[5m])) and max by (pod) (up{container="query-frontend",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: https://mimir.example.com/prometheus
- threshold: "0"
+ threshold: "80"
customHeaders: "X-Scope-OrgID=tenant-1"
type: prometheus
- metadata:
diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
index cd9e4890805..61ab41da261 100644
--- a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m])) and max by (pod) (up{container="ruler",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: https://mimir.example.com/prometheus
- threshold: "0"
+ threshold: "80"
customHeaders: "X-Scope-OrgID=tenant-1"
type: prometheus
- metadata:
diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml
index 646e9990a51..46b16ee4f9c 100644
--- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml
+++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml
@@ -14,6 +14,8 @@ metadata:
{}
namespace: "citestns"
spec:
+ # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it
+ replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mimir
diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
index 2e3d24a64bc..3e8fcd5bbfe 100644
--- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="distributor",namespace="citestns"}[5m])) and max by (pod) (up{container="distributor",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus
- threshold: "0"
+ threshold: "80"
type: prometheus
- metadata:
query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="distributor",namespace="citestns"}) and max by (pod) (up{container="distributor",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="distributor",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="distributor",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="distributor",namespace="citestns", reason="OOMKilled"}) or vector(0))
diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 18178fcca95..544624d9930 100644
--- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-dep.yaml
index 41b9fc28def..e0c3920da3e 100644
--- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-dep.yaml
+++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-dep.yaml
@@ -14,6 +14,8 @@ metadata:
{}
namespace: "citestns"
spec:
+ # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it
+ replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: mimir
diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml
index fe61c27ef0d..ece0a8d3c2e 100644
--- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml
+++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml
@@ -13,6 +13,8 @@ metadata:
{}
namespace: "citestns"
spec:
+ # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it
+ replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mimir
diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
index 5e434fd2bd5..57532d1733a 100644
--- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="query-frontend",namespace="citestns"}[5m])) and max by (pod) (up{container="query-frontend",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus
- threshold: "0"
+ threshold: "80"
type: prometheus
- metadata:
query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="query-frontend",namespace="citestns"}) and max by (pod) (up{container="query-frontend",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="query-frontend",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="query-frontend",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="query-frontend",namespace="citestns", reason="OOMKilled"}) or vector(0))
diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml
index ccac1e7dc74..bca419e8d66 100644
--- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml
+++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml
@@ -14,6 +14,7 @@ metadata:
{}
namespace: "citestns"
spec:
+ replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mimir
diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
index 2f535b095cc..076c87793f6 100644
--- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
+++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml
@@ -32,7 +32,7 @@ spec:
- metadata:
query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m])) and max by (pod) (up{container="ruler",namespace="citestns"}) > 0)[15m:]) * 1000
serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus
- threshold: "0"
+ threshold: "80"
type: prometheus
- metadata:
query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="ruler",namespace="citestns"}) and max by (pod) (up{container="ruler",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="ruler",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="ruler",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler",namespace="citestns", reason="OOMKilled"}) or vector(0))
diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
index 79fd316791f..2aa5df540ee 100644
--- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
+++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
index 16b1eaef860..c76c19846aa 100644
--- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
+++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
index 76c15f5c486..48ccb400e16 100644
--- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
+++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/mimir-config.yaml
index 11d240c4e36..1c6e804d9f9 100644
--- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/mimir-config.yaml
+++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/mimir-config.yaml
@@ -28,14 +28,14 @@ data:
addresses: dns+large-values-mimir-chunks-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 1048576
- timeout: 450ms
+ timeout: 750ms
index_cache:
backend: memcached
memcached:
addresses: dns+large-values-mimir-index-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 5242880
- timeout: 450ms
+ timeout: 750ms
metadata_cache:
backend: memcached
memcached:
diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index db73c8b824c..9abf42e08dd 100644
--- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
index 0d3b11feb06..af01bdf365e 100644
--- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
+++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrole.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrole.yaml
index 229bbb0a4e9..93adc8b070f 100644
--- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrole.yaml
+++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrole.yaml
@@ -9,8 +9,8 @@ metadata:
app.kubernetes.io/instance: metamonitoring-values
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: operator
- helm.sh/chart: grafana-agent-operator-0.3.18
- app.kubernetes.io/version: "0.40.2"
+ helm.sh/chart: grafana-agent-operator-0.3.19
+ app.kubernetes.io/version: "0.40.3"
rules:
- apiGroups: [monitoring.grafana.com]
resources:
diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrolebinding.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrolebinding.yaml
index 007b90c57f5..79b73f94058 100644
--- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrolebinding.yaml
+++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrolebinding.yaml
@@ -9,8 +9,8 @@ metadata:
app.kubernetes.io/instance: metamonitoring-values
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: operator
- helm.sh/chart: grafana-agent-operator-0.3.18
- app.kubernetes.io/version: "0.40.2"
+ helm.sh/chart: grafana-agent-operator-0.3.19
+ app.kubernetes.io/version: "0.40.3"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-deployment.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-deployment.yaml
index 6d1f0f58c8f..ccb92a14b23 100644
--- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-deployment.yaml
+++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-deployment.yaml
@@ -9,8 +9,8 @@ metadata:
app.kubernetes.io/instance: metamonitoring-values
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: operator
- helm.sh/chart: grafana-agent-operator-0.3.18
- app.kubernetes.io/version: "0.40.2"
+ helm.sh/chart: grafana-agent-operator-0.3.19
+ app.kubernetes.io/version: "0.40.3"
spec:
replicas: 1
selector:
@@ -33,7 +33,7 @@ spec:
type: RuntimeDefault
containers:
- name: grafana-agent-operator
- image: "docker.io/grafana/agent-operator:v0.40.2"
+ image: "docker.io/grafana/agent-operator:v0.40.3"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-serviceaccount.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-serviceaccount.yaml
index 0d7aee26954..2a1e18fb8fc 100644
--- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-serviceaccount.yaml
+++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-serviceaccount.yaml
@@ -10,5 +10,5 @@ metadata:
app.kubernetes.io/instance: metamonitoring-values
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: operator
- helm.sh/chart: grafana-agent-operator-0.3.18
- app.kubernetes.io/version: "0.40.2"
+ helm.sh/chart: grafana-agent-operator-0.3.19
+ app.kubernetes.io/version: "0.40.3"
diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/tests/test-grafanaagent.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/tests/test-grafanaagent.yaml
index e00924299bf..5c4e563d52d 100644
--- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/tests/test-grafanaagent.yaml
+++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/tests/test-grafanaagent.yaml
@@ -94,7 +94,7 @@ metadata:
"helm.sh/hook": test
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed
spec:
- image: "docker.io/grafana/agent:v0.40.2"
+ image: "docker.io/grafana/agent:v0.40.3"
logLevel: info
serviceAccountName: grafana-agent-test-sa
metrics:
diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/metamonitoring/grafana-dashboards.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/metamonitoring/grafana-dashboards.yaml
index 07113acfc27..e961fb482f9 100644
--- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/metamonitoring/grafana-dashboards.yaml
+++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/metamonitoring/grafana-dashboards.yaml
@@ -22300,6 +22300,1070 @@ data:
# Source: mimir-distributed/templates/metamonitoring/grafana-dashboards.yaml
apiVersion: v1
kind: ConfigMap
+metadata:
+ name: mimir-remote-ruler-reads-networking-dashboard
+ namespace: citestns
+ labels:
+ app.kubernetes.io/name: mimir
+ app.kubernetes.io/instance: metamonitoring-values
+ app.kubernetes.io/managed-by: Helm
+ grafana_dashboard: "1"
+ annotations:
+ k8s-sidecar-target-directory: /tmp/dashboards/Mimir Dashboards
+data:
+ mimir-remote-ruler-reads-networking.json: |-
+ {
+ "__requires": [
+ {
+ "id": "grafana",
+ "name": "Grafana",
+ "type": "grafana",
+ "version": "8.0.0"
+ }
+ ],
+ "annotations": {
+ "list": [ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "hideControls": false,
+ "links": [
+ {
+ "asDropdown": true,
+ "icon": "external link",
+ "includeVars": true,
+ "keepTime": true,
+ "tags": [
+ "mimir"
+ ],
+ "targetBlank": false,
+ "title": "Mimir dashboards",
+ "type": "dashboards"
+ }
+ ],
+ "refresh": "10s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 1,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 2,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 3,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 4,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Summary",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 5,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 6,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 7,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 8,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-query-frontend",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 9,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 10,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 11,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 12,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-query-scheduler",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 13,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 14,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 15,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 16,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-querier",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "mimir"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "default",
+ "value": "default"
+ },
+ "hide": 0,
+ "label": "Data source",
+ "name": "datasource",
+ "options": [ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": ".*",
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": true,
+ "label": "cluster",
+ "multi": false,
+ "name": "cluster",
+ "options": [ ],
+ "query": "label_values(cortex_build_info, cluster)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [ ],
+ "query": "label_values(cortex_build_info{cluster=~\"$cluster\"}, namespace)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "utc",
+ "title": "Mimir / Remote ruler reads networking",
+ "uid": "9e8cfff65f91632f8a25981c6fe44bc9",
+ "version": 0
+ }
+---
+# Source: mimir-distributed/templates/metamonitoring/grafana-dashboards.yaml
+apiVersion: v1
+kind: ConfigMap
metadata:
name: mimir-remote-ruler-reads-resources-dashboard
namespace: citestns
@@ -22623,7 +23687,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-frontend (dedicated to ruler)",
+ "title": "Ruler-query-frontend",
"titleSize": "h6"
},
{
@@ -22905,7 +23969,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler (dedicated to ruler)",
+ "title": "Ruler-query-scheduler",
"titleSize": "h6"
},
{
@@ -23187,7 +24251,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Querier (dedicated to ruler)",
+ "title": "Ruler-querier",
"titleSize": "h6"
}
],
@@ -23757,7 +24821,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-frontend (dedicated to ruler)",
+ "title": "Ruler-query-frontend",
"titleSize": "h6"
},
{
@@ -24085,7 +25149,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler (dedicated to ruler)",
+ "title": "Ruler-query-scheduler",
"titleSize": "h6"
},
{
@@ -24247,7 +25311,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler Latency (Time in Queue) Breakout by Additional Queue Dimensions",
+ "title": "Ruler-query-scheduler Latency (Time in Queue) Breakout by Additional Queue Dimensions",
"titleSize": "h6"
},
{
@@ -24554,7 +25618,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Querier (dedicated to ruler)",
+ "title": "Ruler-querier",
"titleSize": "h6"
},
{
@@ -24647,7 +25711,7 @@ data:
"sort": "none"
}
},
- "span": 3,
+ "span": 6,
"targets": [
{
"expr": "max by (scaletargetref_name) (\n kube_horizontalpodautoscaler_spec_max_replicas{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"}\n # Add the scaletargetref_name label for readability\n + on (cluster, namespace, horizontalpodautoscaler) group_left (scaletargetref_name)\n 0*kube_horizontalpodautoscaler_info{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"}\n)\n",
@@ -24673,7 +25737,7 @@ data:
},
{
"datasource": "$datasource",
- "description": "### Scaling metric (CPU): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
+ "description": "### Autoscaler failures rate\nThe rate of failures in the KEDA custom metrics API server. Whenever an error occurs, the KEDA custom\nmetrics server is unable to query the scaling metric from Prometheus so the autoscaler woudln't work properly.\n\n",
"fieldConfig": {
"defaults": {
"custom": {
@@ -24708,7 +25772,68 @@ data:
"sort": "none"
}
},
- "span": 3,
+ "span": 6,
+ "targets": [
+ {
+ "expr": "sum by(cluster, namespace, scaler, metric, scaledObject) (\n label_replace(\n rate(keda_scaler_errors[$__rate_interval]),\n \"namespace\", \"$1\", \"exported_namespace\", \"(.+)\"\n )\n) +\non(cluster, namespace, metric, scaledObject) group_left\nlabel_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"} * 0,\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n ),\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n)\n",
+ "format": "time_series",
+ "legendFormat": "{{scaler}} failures",
+ "legendLink": null
+ }
+ ],
+ "title": "Autoscaler failures rate",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-querier - autoscaling",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "description": "### Scaling metric (CPU): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 17,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 4,
"targets": [
{
"expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*cpu.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
@@ -24746,7 +25871,7 @@ data:
},
"overrides": [ ]
},
- "id": 17,
+ "id": 18,
"links": [ ],
"options": {
"legend": {
@@ -24757,7 +25882,7 @@ data:
"sort": "none"
}
},
- "span": 3,
+ "span": 4,
"targets": [
{
"expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*memory.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
@@ -24771,7 +25896,7 @@ data:
},
{
"datasource": "$datasource",
- "description": "### Autoscaler failures rate\nThe rate of failures in the KEDA custom metrics API server. Whenever an error occurs, the KEDA custom\nmetrics server is unable to query the scaling metric from Prometheus so the autoscaler woudln't work properly.\n\n",
+ "description": "### Scaling metric (in-flight queries): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
"fieldConfig": {
"defaults": {
"custom": {
@@ -24795,7 +25920,7 @@ data:
},
"overrides": [ ]
},
- "id": 18,
+ "id": 19,
"links": [ ],
"options": {
"legend": {
@@ -24806,16 +25931,16 @@ data:
"sort": "none"
}
},
- "span": 3,
+ "span": 4,
"targets": [
{
- "expr": "sum by(cluster, namespace, scaler, metric, scaledObject) (\n label_replace(\n rate(keda_scaler_errors[$__rate_interval]),\n \"namespace\", \"$1\", \"exported_namespace\", \"(.+)\"\n )\n) +\non(cluster, namespace, metric, scaledObject) group_left\nlabel_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"} * 0,\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n ),\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n)\n",
+ "expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*queries.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
"format": "time_series",
- "legendFormat": "{{scaler}} failures",
+ "legendFormat": "{{ scaler }}",
"legendLink": null
}
],
- "title": "Autoscaler failures rate",
+ "title": "Scaling metric (in-flight queries): Desired replicas",
"type": "timeseries"
}
],
@@ -24823,7 +25948,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Ruler-Querier - autoscaling",
+ "title": "",
"titleSize": "h6"
}
],
@@ -29744,7 +30869,7 @@ data:
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Accross tenants",
+ "title": "Across tenants",
"titleSize": "h6"
},
{
@@ -33644,11 +34769,26 @@ data:
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -33770,11 +34910,26 @@ data:
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -33956,11 +35111,26 @@ data:
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -34142,11 +35312,26 @@ data:
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -34328,11 +35513,26 @@ data:
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -34454,11 +35654,26 @@ data:
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -34580,11 +35795,26 @@ data:
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -34706,11 +35936,26 @@ data:
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -34837,6 +36082,21 @@ data:
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/metamonitoring/mixin-alerts.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/metamonitoring/mixin-alerts.yaml
index 8348e8d3ab5..941ecb1ce29 100644
--- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/metamonitoring/mixin-alerts.yaml
+++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/metamonitoring/mixin-alerts.yaml
@@ -48,18 +48,6 @@ spec:
for: 15m
labels:
severity: warning
- - alert: MimirQueriesIncorrect
- annotations:
- message: |
- The Mimir cluster {{ $labels.cluster }}/{{ $labels.namespace }} is experiencing {{ printf "%.2f" $value }}% incorrect query results.
- runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirqueriesincorrect
- expr: |
- 100 * sum by (cluster, namespace) (rate(test_exporter_test_case_result_total{result="fail"}[5m]))
- /
- sum by (cluster, namespace) (rate(test_exporter_test_case_result_total[5m])) > 1
- for: 15m
- labels:
- severity: warning
- alert: MimirInconsistentRuntimeConfig
annotations:
message: |
@@ -963,6 +951,95 @@ spec:
for: 1h
labels:
severity: critical
+ - name: mimir_ingest_storage_alerts
+ rules:
+ - alert: MimirIngesterLastConsumedOffsetCommitFailed
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is failing to commit the last consumed offset.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterlastconsumedoffsetcommitfailed
+ expr: |
+ sum by(cluster, namespace, pod) (rate(cortex_ingest_storage_reader_offset_commit_failures_total[5m]))
+ /
+ sum by(cluster, namespace, pod) (rate(cortex_ingest_storage_reader_offset_commit_requests_total[5m]))
+ > 0.2
+ for: 15m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailedToReadRecordsFromKafka
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is failing to read records from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailedtoreadrecordsfromkafka
+ expr: |
+ sum by(cluster, namespace, pod, node_id) (rate(cortex_ingest_storage_reader_read_errors_total[1m]))
+ > 0
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterKafkaFetchErrorsRateTooHigh
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is receiving fetch errors when reading records from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterkafkafetcherrorsratetoohigh
+ expr: |
+ sum by (cluster, namespace, pod) (rate (cortex_ingest_storage_reader_fetch_errors_total[5m]))
+ /
+ sum by (cluster, namespace, pod) (rate (cortex_ingest_storage_reader_fetches_total[5m]))
+ > 0.1
+ for: 15m
+ labels:
+ severity: critical
+ - alert: MimirStartingIngesterKafkaReceiveDelayIncreasing
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} in "starting" phase is not reducing consumption lag of write requests read
+ from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirstartingingesterkafkareceivedelayincreasing
+ expr: |
+ deriv((
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_receive_delay_seconds_sum{phase="starting"}[1m]))
+ /
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_receive_delay_seconds_count{phase="starting"}[1m]))
+ )[5m:1m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ - alert: MimirRunningIngesterReceiveDelayTooHigh
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} in "running" phase is too far behind in its consumption of write requests
+ from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirrunningingesterreceivedelaytoohigh
+ expr: |
+ (
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_receive_delay_seconds_sum{phase="running"}[1m]))
+ /
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_receive_delay_seconds_count{phase="running"}[1m]))
+ ) > (10 * 60)
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailsToProcessRecordsFromKafka
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} fails to consume write requests read from Kafka due to internal errors.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailstoprocessrecordsfromkafka
+ expr: |
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_records_failed_total{cause="server"}[1m])) > 0
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailsEnforceStrongConsistencyOnReadPath
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} fails to enforce strong-consistency on read-path.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailsenforcestrongconsistencyonreadpath
+ expr: |
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_strong_consistency_failures_total[1m])) > 0
+ for: 5m
+ labels:
+ severity: critical
- name: mimir_continuous_test
rules:
- alert: MimirContinuousTestNotRunningOnWrites
diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 7ae0bdcda06..890effdf96b 100644
--- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 46d4d608920..62667592c3b 100644
--- a/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
index 25a98e31967..da7139d1c0c 100644
--- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
+++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
index b2a9287eb00..69d2913980e 100644
--- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
+++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
index 448ff6558dc..56584709173 100644
--- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
+++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/mimir-config.yaml
index ea2c7412726..ed802edbfd5 100644
--- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/mimir-config.yaml
+++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/mimir-config.yaml
@@ -28,14 +28,14 @@ data:
addresses: dns+small-values-mimir-chunks-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 1048576
- timeout: 450ms
+ timeout: 750ms
index_cache:
backend: memcached
memcached:
addresses: dns+small-values-mimir-index-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 5242880
- timeout: 450ms
+ timeout: 750ms
metadata_cache:
backend: memcached
memcached:
diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 0af623dbcbc..8fdfc803cc8 100644
--- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
index ffffa7281d5..34a5700db41 100644
--- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
+++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-cache/admin-cache-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-cache/admin-cache-statefulset.yaml
index a6c8836b174..5583fcd02d6 100644
--- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-cache/admin-cache-statefulset.yaml
+++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-cache/admin-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
index 468a2b5a992..efc0132b866 100644
--- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
+++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
index 48e0c046e0d..7c0f5cacdfd 100644
--- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
+++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
index 0c5fb05b64f..04e814bb9f2 100644
--- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
+++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/mimir-config.yaml
index ebbf7ac035a..eb1ac87caf8 100644
--- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/mimir-config.yaml
+++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/mimir-config.yaml
@@ -58,14 +58,14 @@ data:
addresses: dns+test-enterprise-configmap-values-mimir-chunks-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 1048576
- timeout: 450ms
+ timeout: 750ms
index_cache:
backend: memcached
memcached:
addresses: dns+test-enterprise-configmap-values-mimir-index-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 5242880
- timeout: 450ms
+ timeout: 750ms
metadata_cache:
backend: memcached
memcached:
diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
index a74d5e00a08..e70bdcc303c 100644
--- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
+++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-ingress-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-ingress-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index f79d3672f95..e05a1ba8843 100644
--- a/operations/helm/tests/test-ingress-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/test-ingress-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
index cdd57106844..e902b606265 100644
--- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
+++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
index 0212f3786a5..b360b6fa0c9 100644
--- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
+++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
index 1607a2c7518..1963654cd16 100644
--- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
+++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/mimir-config.yaml
index 4384daf03af..fb284cf4976 100644
--- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/mimir-config.yaml
+++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/mimir-config.yaml
@@ -36,14 +36,14 @@ data:
addresses: dns+test-oss-k8s-1.25-values-mimir-chunks-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 1048576
- timeout: 450ms
+ timeout: 750ms
index_cache:
backend: memcached
memcached:
addresses: dns+test-oss-k8s-1.25-values-mimir-index-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 5242880
- timeout: 450ms
+ timeout: 750ms
metadata_cache:
backend: memcached
memcached:
diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 911aadde2c5..9321a46915c 100644
--- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
index e82275bd425..3aef58c1906 100644
--- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
+++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
@@ -46,7 +46,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 6792cc78831..02723e816e8 100644
--- a/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 15a9088a8ab..f6eef2e6299 100644
--- a/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
index d96cd7c9698..535ea365851 100644
--- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
+++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
index 0fb77caf6e9..9397079b1e1 100644
--- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
+++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
index 2721ec824aa..34ae8d8d9db 100644
--- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
+++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml
index 8ae0650c741..43919352e22 100644
--- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml
+++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml
@@ -36,14 +36,14 @@ data:
addresses: dns+test-oss-values-mimir-chunks-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 1048576
- timeout: 450ms
+ timeout: 750ms
index_cache:
backend: memcached
memcached:
addresses: dns+test-oss-values-mimir-index-cache.citestns.svc:11211
max_idle_connections: 150
max_item_size: 5242880
- timeout: 450ms
+ timeout: 750ms
metadata_cache:
backend: memcached
memcached:
diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index d0d91a6f80f..09680dad95f 100644
--- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
index 980e06bad3d..3e055fa5c99 100644
--- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
+++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml
@@ -47,7 +47,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: memcached
- image: memcached:1.6.24-alpine
+ image: memcached:1.6.25-alpine
imagePullPolicy: IfNotPresent
resources:
limits: null
@@ -73,7 +73,7 @@ spec:
- ALL
readOnlyRootFilesystem: true
- name: exporter
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9150
diff --git a/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 86b0a47579e..2728db92a27 100644
--- a/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
index 21c25a0e989..5f2c2561c5d 100644
--- a/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
+++ b/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml
@@ -45,11 +45,20 @@ data:
"" "anonymous";
}
+ map $http_x_scope_orgid $has_multiple_orgid_headers {
+ default 0;
+ "~^.+,.+$" 1;
+ }
+
proxy_read_timeout 300;
server {
listen 8080;
listen [::]:8080;
+ if ($has_multiple_orgid_headers = 1) {
+ return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.';
+ }
+
location = / {
return 200 'OK';
auth_basic off;
diff --git a/operations/mimir-mixin-compiled-baremetal/alerts.yaml b/operations/mimir-mixin-compiled-baremetal/alerts.yaml
index 48c05a9a69b..0efa8546bbb 100644
--- a/operations/mimir-mixin-compiled-baremetal/alerts.yaml
+++ b/operations/mimir-mixin-compiled-baremetal/alerts.yaml
@@ -36,18 +36,6 @@ groups:
for: 15m
labels:
severity: warning
- - alert: MimirQueriesIncorrect
- annotations:
- message: |
- The Mimir cluster {{ $labels.cluster }}/{{ $labels.namespace }} is experiencing {{ printf "%.2f" $value }}% incorrect query results.
- runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirqueriesincorrect
- expr: |
- 100 * sum by (cluster, namespace) (rate(test_exporter_test_case_result_total{result="fail"}[5m]))
- /
- sum by (cluster, namespace) (rate(test_exporter_test_case_result_total[5m])) > 1
- for: 15m
- labels:
- severity: warning
- alert: MimirInconsistentRuntimeConfig
annotations:
message: |
@@ -938,6 +926,95 @@ groups:
for: 1h
labels:
severity: critical
+- name: mimir_ingest_storage_alerts
+ rules:
+ - alert: MimirIngesterLastConsumedOffsetCommitFailed
+ annotations:
+ message: Mimir {{ $labels.instance }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is failing to commit the last consumed offset.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterlastconsumedoffsetcommitfailed
+ expr: |
+ sum by(cluster, namespace, instance) (rate(cortex_ingest_storage_reader_offset_commit_failures_total[5m]))
+ /
+ sum by(cluster, namespace, instance) (rate(cortex_ingest_storage_reader_offset_commit_requests_total[5m]))
+ > 0.2
+ for: 15m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailedToReadRecordsFromKafka
+ annotations:
+ message: Mimir {{ $labels.instance }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is failing to read records from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailedtoreadrecordsfromkafka
+ expr: |
+ sum by(cluster, namespace, instance, node_id) (rate(cortex_ingest_storage_reader_read_errors_total[1m]))
+ > 0
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterKafkaFetchErrorsRateTooHigh
+ annotations:
+ message: Mimir {{ $labels.instance }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is receiving fetch errors when reading records from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterkafkafetcherrorsratetoohigh
+ expr: |
+ sum by (cluster, namespace, instance) (rate (cortex_ingest_storage_reader_fetch_errors_total[5m]))
+ /
+ sum by (cluster, namespace, instance) (rate (cortex_ingest_storage_reader_fetches_total[5m]))
+ > 0.1
+ for: 15m
+ labels:
+ severity: critical
+ - alert: MimirStartingIngesterKafkaReceiveDelayIncreasing
+ annotations:
+ message: Mimir {{ $labels.instance }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} in "starting" phase is not reducing consumption lag of write requests read
+ from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirstartingingesterkafkareceivedelayincreasing
+ expr: |
+ deriv((
+ sum by (cluster, namespace, instance) (rate(cortex_ingest_storage_reader_receive_delay_seconds_sum{phase="starting"}[1m]))
+ /
+ sum by (cluster, namespace, instance) (rate(cortex_ingest_storage_reader_receive_delay_seconds_count{phase="starting"}[1m]))
+ )[5m:1m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ - alert: MimirRunningIngesterReceiveDelayTooHigh
+ annotations:
+ message: Mimir {{ $labels.instance }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} in "running" phase is too far behind in its consumption of write requests
+ from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirrunningingesterreceivedelaytoohigh
+ expr: |
+ (
+ sum by (cluster, namespace, instance) (rate(cortex_ingest_storage_reader_receive_delay_seconds_sum{phase="running"}[1m]))
+ /
+ sum by (cluster, namespace, instance) (rate(cortex_ingest_storage_reader_receive_delay_seconds_count{phase="running"}[1m]))
+ ) > (10 * 60)
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailsToProcessRecordsFromKafka
+ annotations:
+ message: Mimir {{ $labels.instance }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} fails to consume write requests read from Kafka due to internal errors.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailstoprocessrecordsfromkafka
+ expr: |
+ sum by (cluster, namespace, instance) (rate(cortex_ingest_storage_reader_records_failed_total{cause="server"}[1m])) > 0
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailsEnforceStrongConsistencyOnReadPath
+ annotations:
+ message: Mimir {{ $labels.instance }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} fails to enforce strong-consistency on read-path.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailsenforcestrongconsistencyonreadpath
+ expr: |
+ sum by (cluster, namespace, instance) (rate(cortex_ingest_storage_strong_consistency_failures_total[1m])) > 0
+ for: 5m
+ labels:
+ severity: critical
- name: mimir_continuous_test
rules:
- alert: MimirContinuousTestNotRunningOnWrites
diff --git a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads-networking.json b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads-networking.json
new file mode 100644
index 00000000000..93ed8c36e98
--- /dev/null
+++ b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads-networking.json
@@ -0,0 +1,1048 @@
+{
+ "__requires": [
+ {
+ "id": "grafana",
+ "name": "Grafana",
+ "type": "grafana",
+ "version": "8.0.0"
+ }
+ ],
+ "annotations": {
+ "list": [ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "hideControls": false,
+ "links": [
+ {
+ "asDropdown": true,
+ "icon": "external link",
+ "includeVars": true,
+ "keepTime": true,
+ "tags": [
+ "mimir"
+ ],
+ "targetBlank": false,
+ "title": "Mimir dashboards",
+ "type": "dashboards"
+ }
+ ],
+ "refresh": "10s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 1,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{instance}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 2,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{instance}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 3,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 4,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(instance) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(instance) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Summary",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 5,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-frontend.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{instance}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 6,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-frontend.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{instance}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 7,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 8,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(instance) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-frontend.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(instance) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-frontend.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-query-frontend",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 9,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-scheduler.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{instance}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 10,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-scheduler.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{instance}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 11,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 12,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(instance) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-scheduler.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(instance) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-scheduler.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-query-scheduler",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 13,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-querier.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{instance}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 14,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-querier.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{instance}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 15,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 16,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(instance) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-querier.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(instance) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-querier.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",instance=~\".*ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-querier",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "mimir"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "default",
+ "value": "default"
+ },
+ "hide": 0,
+ "label": "Data source",
+ "name": "datasource",
+ "options": [ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": ".*",
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": true,
+ "label": "cluster",
+ "multi": false,
+ "name": "cluster",
+ "options": [ ],
+ "query": "label_values(cortex_build_info, cluster)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [ ],
+ "query": "label_values(cortex_build_info{cluster=~\"$cluster\"}, namespace)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "utc",
+ "title": "Mimir / Remote ruler reads networking",
+ "uid": "9e8cfff65f91632f8a25981c6fe44bc9",
+ "version": 0
+ }
\ No newline at end of file
diff --git a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads-resources.json b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads-resources.json
index 6e9f534f182..27e49bc79f5 100644
--- a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads-resources.json
+++ b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads-resources.json
@@ -285,7 +285,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-frontend (dedicated to ruler)",
+ "title": "Ruler-query-frontend",
"titleSize": "h6"
},
{
@@ -543,7 +543,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler (dedicated to ruler)",
+ "title": "Ruler-query-scheduler",
"titleSize": "h6"
},
{
@@ -801,7 +801,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Querier (dedicated to ruler)",
+ "title": "Ruler-querier",
"titleSize": "h6"
}
],
diff --git a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads.json b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads.json
index 9cb3fbabb50..0d3e812b64e 100644
--- a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads.json
+++ b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-remote-ruler-reads.json
@@ -445,7 +445,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-frontend (dedicated to ruler)",
+ "title": "Ruler-query-frontend",
"titleSize": "h6"
},
{
@@ -773,7 +773,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler (dedicated to ruler)",
+ "title": "Ruler-query-scheduler",
"titleSize": "h6"
},
{
@@ -935,7 +935,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler Latency (Time in Queue) Breakout by Additional Queue Dimensions",
+ "title": "Ruler-query-scheduler Latency (Time in Queue) Breakout by Additional Queue Dimensions",
"titleSize": "h6"
},
{
@@ -1242,7 +1242,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Querier (dedicated to ruler)",
+ "title": "Ruler-querier",
"titleSize": "h6"
},
{
@@ -1335,7 +1335,7 @@
"sort": "none"
}
},
- "span": 3,
+ "span": 6,
"targets": [
{
"expr": "max by (scaletargetref_name) (\n kube_horizontalpodautoscaler_spec_max_replicas{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"}\n # Add the scaletargetref_name label for readability\n + on (cluster, namespace, horizontalpodautoscaler) group_left (scaletargetref_name)\n 0*kube_horizontalpodautoscaler_info{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"}\n)\n",
@@ -1361,7 +1361,7 @@
},
{
"datasource": "$datasource",
- "description": "### Scaling metric (CPU): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
+ "description": "### Autoscaler failures rate\nThe rate of failures in the KEDA custom metrics API server. Whenever an error occurs, the KEDA custom\nmetrics server is unable to query the scaling metric from Prometheus so the autoscaler woudln't work properly.\n\n",
"fieldConfig": {
"defaults": {
"custom": {
@@ -1396,7 +1396,68 @@
"sort": "none"
}
},
- "span": 3,
+ "span": 6,
+ "targets": [
+ {
+ "expr": "sum by(cluster, namespace, scaler, metric, scaledObject) (\n label_replace(\n rate(keda_scaler_errors[$__rate_interval]),\n \"namespace\", \"$1\", \"exported_namespace\", \"(.+)\"\n )\n) +\non(cluster, namespace, metric, scaledObject) group_left\nlabel_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"} * 0,\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n ),\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n)\n",
+ "format": "time_series",
+ "legendFormat": "{{scaler}} failures",
+ "legendLink": null
+ }
+ ],
+ "title": "Autoscaler failures rate",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-querier - autoscaling",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "description": "### Scaling metric (CPU): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 17,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 4,
"targets": [
{
"expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*cpu.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
@@ -1434,7 +1495,7 @@
},
"overrides": [ ]
},
- "id": 17,
+ "id": 18,
"links": [ ],
"options": {
"legend": {
@@ -1445,7 +1506,7 @@
"sort": "none"
}
},
- "span": 3,
+ "span": 4,
"targets": [
{
"expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*memory.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
@@ -1459,7 +1520,7 @@
},
{
"datasource": "$datasource",
- "description": "### Autoscaler failures rate\nThe rate of failures in the KEDA custom metrics API server. Whenever an error occurs, the KEDA custom\nmetrics server is unable to query the scaling metric from Prometheus so the autoscaler woudln't work properly.\n\n",
+ "description": "### Scaling metric (in-flight queries): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
"fieldConfig": {
"defaults": {
"custom": {
@@ -1483,7 +1544,7 @@
},
"overrides": [ ]
},
- "id": 18,
+ "id": 19,
"links": [ ],
"options": {
"legend": {
@@ -1494,16 +1555,16 @@
"sort": "none"
}
},
- "span": 3,
+ "span": 4,
"targets": [
{
- "expr": "sum by(cluster, namespace, scaler, metric, scaledObject) (\n label_replace(\n rate(keda_scaler_errors[$__rate_interval]),\n \"namespace\", \"$1\", \"exported_namespace\", \"(.+)\"\n )\n) +\non(cluster, namespace, metric, scaledObject) group_left\nlabel_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"} * 0,\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n ),\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n)\n",
+ "expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*queries.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
"format": "time_series",
- "legendFormat": "{{scaler}} failures",
+ "legendFormat": "{{ scaler }}",
"legendLink": null
}
],
- "title": "Autoscaler failures rate",
+ "title": "Scaling metric (in-flight queries): Desired replicas",
"type": "timeseries"
}
],
@@ -1511,7 +1572,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Ruler-Querier - autoscaling",
+ "title": "",
"titleSize": "h6"
}
],
diff --git a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-slow-queries.json b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-slow-queries.json
index fa2c1451ff4..240205b6f93 100644
--- a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-slow-queries.json
+++ b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-slow-queries.json
@@ -364,7 +364,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Accross tenants",
+ "title": "Across tenants",
"titleSize": "h6"
},
{
diff --git a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-top-tenants.json b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-top-tenants.json
index fe9a3321e4d..1d50197d70a 100644
--- a/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-top-tenants.json
+++ b/operations/mimir-mixin-compiled-baremetal/dashboards/mimir-top-tenants.json
@@ -108,11 +108,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -234,11 +249,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -420,11 +450,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -606,11 +651,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -792,11 +852,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -918,11 +993,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -1044,11 +1134,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -1170,11 +1275,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -1301,6 +1421,21 @@
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
diff --git a/operations/mimir-mixin-compiled/alerts.yaml b/operations/mimir-mixin-compiled/alerts.yaml
index c3a5fd94c48..ef05886356b 100644
--- a/operations/mimir-mixin-compiled/alerts.yaml
+++ b/operations/mimir-mixin-compiled/alerts.yaml
@@ -36,18 +36,6 @@ groups:
for: 15m
labels:
severity: warning
- - alert: MimirQueriesIncorrect
- annotations:
- message: |
- The Mimir cluster {{ $labels.cluster }}/{{ $labels.namespace }} is experiencing {{ printf "%.2f" $value }}% incorrect query results.
- runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirqueriesincorrect
- expr: |
- 100 * sum by (cluster, namespace) (rate(test_exporter_test_case_result_total{result="fail"}[5m]))
- /
- sum by (cluster, namespace) (rate(test_exporter_test_case_result_total[5m])) > 1
- for: 15m
- labels:
- severity: warning
- alert: MimirInconsistentRuntimeConfig
annotations:
message: |
@@ -951,6 +939,95 @@ groups:
for: 1h
labels:
severity: critical
+- name: mimir_ingest_storage_alerts
+ rules:
+ - alert: MimirIngesterLastConsumedOffsetCommitFailed
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is failing to commit the last consumed offset.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterlastconsumedoffsetcommitfailed
+ expr: |
+ sum by(cluster, namespace, pod) (rate(cortex_ingest_storage_reader_offset_commit_failures_total[5m]))
+ /
+ sum by(cluster, namespace, pod) (rate(cortex_ingest_storage_reader_offset_commit_requests_total[5m]))
+ > 0.2
+ for: 15m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailedToReadRecordsFromKafka
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is failing to read records from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailedtoreadrecordsfromkafka
+ expr: |
+ sum by(cluster, namespace, pod, node_id) (rate(cortex_ingest_storage_reader_read_errors_total[1m]))
+ > 0
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterKafkaFetchErrorsRateTooHigh
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} is receiving fetch errors when reading records from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterkafkafetcherrorsratetoohigh
+ expr: |
+ sum by (cluster, namespace, pod) (rate (cortex_ingest_storage_reader_fetch_errors_total[5m]))
+ /
+ sum by (cluster, namespace, pod) (rate (cortex_ingest_storage_reader_fetches_total[5m]))
+ > 0.1
+ for: 15m
+ labels:
+ severity: critical
+ - alert: MimirStartingIngesterKafkaReceiveDelayIncreasing
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} in "starting" phase is not reducing consumption lag of write requests read
+ from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirstartingingesterkafkareceivedelayincreasing
+ expr: |
+ deriv((
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_receive_delay_seconds_sum{phase="starting"}[1m]))
+ /
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_receive_delay_seconds_count{phase="starting"}[1m]))
+ )[5m:1m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ - alert: MimirRunningIngesterReceiveDelayTooHigh
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} in "running" phase is too far behind in its consumption of write requests
+ from Kafka.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimirrunningingesterreceivedelaytoohigh
+ expr: |
+ (
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_receive_delay_seconds_sum{phase="running"}[1m]))
+ /
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_receive_delay_seconds_count{phase="running"}[1m]))
+ ) > (10 * 60)
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailsToProcessRecordsFromKafka
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} fails to consume write requests read from Kafka due to internal errors.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailstoprocessrecordsfromkafka
+ expr: |
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_reader_records_failed_total{cause="server"}[1m])) > 0
+ for: 5m
+ labels:
+ severity: critical
+ - alert: MimirIngesterFailsEnforceStrongConsistencyOnReadPath
+ annotations:
+ message: Mimir {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace
+ }} fails to enforce strong-consistency on read-path.
+ runbook_url: https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#mimiringesterfailsenforcestrongconsistencyonreadpath
+ expr: |
+ sum by (cluster, namespace, pod) (rate(cortex_ingest_storage_strong_consistency_failures_total[1m])) > 0
+ for: 5m
+ labels:
+ severity: critical
- name: mimir_continuous_test
rules:
- alert: MimirContinuousTestNotRunningOnWrites
diff --git a/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads-networking.json b/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads-networking.json
new file mode 100644
index 00000000000..28c919fa5d0
--- /dev/null
+++ b/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads-networking.json
@@ -0,0 +1,1048 @@
+{
+ "__requires": [
+ {
+ "id": "grafana",
+ "name": "Grafana",
+ "type": "grafana",
+ "version": "8.0.0"
+ }
+ ],
+ "annotations": {
+ "list": [ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "hideControls": false,
+ "links": [
+ {
+ "asDropdown": true,
+ "icon": "external link",
+ "includeVars": true,
+ "keepTime": true,
+ "tags": [
+ "mimir"
+ ],
+ "targetBlank": false,
+ "title": "Mimir dashboards",
+ "type": "dashboards"
+ }
+ ],
+ "refresh": "10s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 1,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 2,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 3,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 4,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?(ruler-query-frontend|ruler-query-scheduler|ruler-querier).*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Summary",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 5,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 6,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 7,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 8,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-frontend.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-query-frontend",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 9,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 10,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 11,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 12,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-query-scheduler.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-query-scheduler",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 13,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Receive bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
+ "id": 14,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "sum by(pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"}[$__rate_interval]))",
+ "format": "time_series",
+ "legendFormat": "{{pod}}",
+ "legendLink": null
+ }
+ ],
+ "title": "Transmit bandwidth",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 15,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(cortex_inflight_requests{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ }
+ ],
+ "title": "Inflight requests (per pod)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "custom": {
+ "fillOpacity": 0
+ },
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 16,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 3,
+ "targets": [
+ {
+ "expr": "avg(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"}))",
+ "format": "time_series",
+ "legendFormat": "avg",
+ "legendLink": null
+ },
+ {
+ "expr": "max(sum by(pod) (cortex_tcp_connections{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"}))",
+ "format": "time_series",
+ "legendFormat": "highest",
+ "legendLink": null
+ },
+ {
+ "expr": "min(cortex_tcp_connections_limit{cluster=~\"$cluster\", namespace=~\"$namespace\",pod=~\"(.*mimir-)?ruler-querier.*\"})",
+ "format": "time_series",
+ "legendFormat": "limit",
+ "legendLink": null
+ }
+ ],
+ "title": "TCP connections (per pod)",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-querier",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "mimir"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "default",
+ "value": "default"
+ },
+ "hide": 0,
+ "label": "Data source",
+ "name": "datasource",
+ "options": [ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": ".*",
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": true,
+ "label": "cluster",
+ "multi": false,
+ "name": "cluster",
+ "options": [ ],
+ "query": "label_values(cortex_build_info, cluster)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [ ],
+ "query": "label_values(cortex_build_info{cluster=~\"$cluster\"}, namespace)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "utc",
+ "title": "Mimir / Remote ruler reads networking",
+ "uid": "9e8cfff65f91632f8a25981c6fe44bc9",
+ "version": 0
+ }
\ No newline at end of file
diff --git a/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads-resources.json b/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads-resources.json
index f48ce745e47..c079baa785d 100644
--- a/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads-resources.json
+++ b/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads-resources.json
@@ -309,7 +309,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-frontend (dedicated to ruler)",
+ "title": "Ruler-query-frontend",
"titleSize": "h6"
},
{
@@ -591,7 +591,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler (dedicated to ruler)",
+ "title": "Ruler-query-scheduler",
"titleSize": "h6"
},
{
@@ -873,7 +873,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Querier (dedicated to ruler)",
+ "title": "Ruler-querier",
"titleSize": "h6"
}
],
diff --git a/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads.json b/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads.json
index 07dd84baf75..3b509ef8a48 100644
--- a/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads.json
+++ b/operations/mimir-mixin-compiled/dashboards/mimir-remote-ruler-reads.json
@@ -445,7 +445,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-frontend (dedicated to ruler)",
+ "title": "Ruler-query-frontend",
"titleSize": "h6"
},
{
@@ -773,7 +773,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler (dedicated to ruler)",
+ "title": "Ruler-query-scheduler",
"titleSize": "h6"
},
{
@@ -935,7 +935,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Query-scheduler Latency (Time in Queue) Breakout by Additional Queue Dimensions",
+ "title": "Ruler-query-scheduler Latency (Time in Queue) Breakout by Additional Queue Dimensions",
"titleSize": "h6"
},
{
@@ -1242,7 +1242,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Querier (dedicated to ruler)",
+ "title": "Ruler-querier",
"titleSize": "h6"
},
{
@@ -1335,7 +1335,7 @@
"sort": "none"
}
},
- "span": 3,
+ "span": 6,
"targets": [
{
"expr": "max by (scaletargetref_name) (\n kube_horizontalpodautoscaler_spec_max_replicas{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"}\n # Add the scaletargetref_name label for readability\n + on (cluster, namespace, horizontalpodautoscaler) group_left (scaletargetref_name)\n 0*kube_horizontalpodautoscaler_info{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"}\n)\n",
@@ -1361,7 +1361,7 @@
},
{
"datasource": "$datasource",
- "description": "### Scaling metric (CPU): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
+ "description": "### Autoscaler failures rate\nThe rate of failures in the KEDA custom metrics API server. Whenever an error occurs, the KEDA custom\nmetrics server is unable to query the scaling metric from Prometheus so the autoscaler woudln't work properly.\n\n",
"fieldConfig": {
"defaults": {
"custom": {
@@ -1396,7 +1396,68 @@
"sort": "none"
}
},
- "span": 3,
+ "span": 6,
+ "targets": [
+ {
+ "expr": "sum by(cluster, namespace, scaler, metric, scaledObject) (\n label_replace(\n rate(keda_scaler_errors[$__rate_interval]),\n \"namespace\", \"$1\", \"exported_namespace\", \"(.+)\"\n )\n) +\non(cluster, namespace, metric, scaledObject) group_left\nlabel_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"} * 0,\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n ),\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n)\n",
+ "format": "time_series",
+ "legendFormat": "{{scaler}} failures",
+ "legendLink": null
+ }
+ ],
+ "title": "Autoscaler failures rate",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ruler-querier - autoscaling",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "datasource": "$datasource",
+ "description": "### Scaling metric (CPU): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 1,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "id": 17,
+ "links": [ ],
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "span": 4,
"targets": [
{
"expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*cpu.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
@@ -1434,7 +1495,7 @@
},
"overrides": [ ]
},
- "id": 17,
+ "id": 18,
"links": [ ],
"options": {
"legend": {
@@ -1445,7 +1506,7 @@
"sort": "none"
}
},
- "span": 3,
+ "span": 4,
"targets": [
{
"expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*memory.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
@@ -1459,7 +1520,7 @@
},
{
"datasource": "$datasource",
- "description": "### Autoscaler failures rate\nThe rate of failures in the KEDA custom metrics API server. Whenever an error occurs, the KEDA custom\nmetrics server is unable to query the scaling metric from Prometheus so the autoscaler woudln't work properly.\n\n",
+ "description": "### Scaling metric (in-flight queries): Desired replicas\nThis panel shows the scaling metric exposed by KEDA divided by the target/threshold used.\nIt should represent the desired number of replicas, ignoring the min/max constraints applied later.\n\n",
"fieldConfig": {
"defaults": {
"custom": {
@@ -1483,7 +1544,7 @@
},
"overrides": [ ]
},
- "id": 18,
+ "id": 19,
"links": [ ],
"options": {
"legend": {
@@ -1494,16 +1555,16 @@
"sort": "none"
}
},
- "span": 3,
+ "span": 4,
"targets": [
{
- "expr": "sum by(cluster, namespace, scaler, metric, scaledObject) (\n label_replace(\n rate(keda_scaler_errors[$__rate_interval]),\n \"namespace\", \"$1\", \"exported_namespace\", \"(.+)\"\n )\n) +\non(cluster, namespace, metric, scaledObject) group_left\nlabel_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"} * 0,\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n ),\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n)\n",
+ "expr": "sum by (scaler) (\n label_replace(\n keda_scaler_metrics_value{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", scaler=~\".*queries.*\"},\n \"namespace\", \"$1\", \"exported_namespace\", \"(.*)\"\n )\n /\n on(cluster, namespace, scaledObject, metric) group_left label_replace(\n label_replace(\n kube_horizontalpodautoscaler_spec_target_metric{cluster=~\"$cluster\", namespace=~\"$namespace\", horizontalpodautoscaler=~\"keda-hpa-ruler-querier\"},\n \"metric\", \"$1\", \"metric_name\", \"(.+)\"\n ),\n \"scaledObject\", \"$1\", \"horizontalpodautoscaler\", \"keda-hpa-(.*)\"\n )\n)\n",
"format": "time_series",
- "legendFormat": "{{scaler}} failures",
+ "legendFormat": "{{ scaler }}",
"legendLink": null
}
],
- "title": "Autoscaler failures rate",
+ "title": "Scaling metric (in-flight queries): Desired replicas",
"type": "timeseries"
}
],
@@ -1511,7 +1572,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Ruler-Querier - autoscaling",
+ "title": "",
"titleSize": "h6"
}
],
diff --git a/operations/mimir-mixin-compiled/dashboards/mimir-slow-queries.json b/operations/mimir-mixin-compiled/dashboards/mimir-slow-queries.json
index fa2c1451ff4..240205b6f93 100644
--- a/operations/mimir-mixin-compiled/dashboards/mimir-slow-queries.json
+++ b/operations/mimir-mixin-compiled/dashboards/mimir-slow-queries.json
@@ -364,7 +364,7 @@
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
- "title": "Accross tenants",
+ "title": "Across tenants",
"titleSize": "h6"
},
{
diff --git a/operations/mimir-mixin-compiled/dashboards/mimir-top-tenants.json b/operations/mimir-mixin-compiled/dashboards/mimir-top-tenants.json
index fe9a3321e4d..1d50197d70a 100644
--- a/operations/mimir-mixin-compiled/dashboards/mimir-top-tenants.json
+++ b/operations/mimir-mixin-compiled/dashboards/mimir-top-tenants.json
@@ -108,11 +108,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -234,11 +249,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -420,11 +450,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -606,11 +651,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -792,11 +852,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -918,11 +993,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -1044,11 +1134,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -1170,11 +1275,26 @@
"linkTargetBlank": false,
"linkTooltip": "Drill down",
"linkUrl": "",
- "pattern": "Value #A",
+ "pattern": "Value",
"thresholds": [ ],
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
@@ -1301,6 +1421,21 @@
"type": "number",
"unit": "short"
},
+ {
+ "alias": "user",
+ "colorMode": null,
+ "colors": [ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTargetBlank": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "user",
+ "thresholds": [ ],
+ "type": "number",
+ "unit": "string"
+ },
{
"alias": "",
"colorMode": null,
diff --git a/operations/mimir-mixin-tools/screenshots/app.js b/operations/mimir-mixin-tools/screenshots/app.js
index 8d72d4aa011..88a7f8c6228 100644
--- a/operations/mimir-mixin-tools/screenshots/app.js
+++ b/operations/mimir-mixin-tools/screenshots/app.js
@@ -27,6 +27,7 @@ const customViewportHeight = {
'mimir-writes-resources': 1600,
'mimir-remote-ruler-reads': 1800,
'mimir-remote-ruler-reads-resources': 1100,
+ 'mimir-remote-ruler-reads-networking': 1400,
};
// Dashboards for which we're not generating the screenshots because their content
diff --git a/operations/mimir-mixin-tools/serve/provisioning-datasources.yaml b/operations/mimir-mixin-tools/serve/provisioning-datasources.yaml
index 6829510728e..93814e880c1 100644
--- a/operations/mimir-mixin-tools/serve/provisioning-datasources.yaml
+++ b/operations/mimir-mixin-tools/serve/provisioning-datasources.yaml
@@ -13,6 +13,13 @@ datasources:
basicAuthPassword: $DATASOURCE_PASSWORD
version: 1
editable: true
+ - name: Mimir (ingest-storage local dev env)
+ type: prometheus
+ access: proxy
+ orgId: 1
+ url: http://nginx:8080/prometheus
+ version: 1
+ editable: true
- name: Loki
type: loki
access: proxy
diff --git a/operations/mimir-mixin/alerts.libsonnet b/operations/mimir-mixin/alerts.libsonnet
index 10f936e8d8b..03fe7d4cdff 100644
--- a/operations/mimir-mixin/alerts.libsonnet
+++ b/operations/mimir-mixin/alerts.libsonnet
@@ -6,5 +6,6 @@
(import 'alerts/blocks.libsonnet') +
(import 'alerts/compactor.libsonnet') +
(import 'alerts/autoscaling.libsonnet') +
+ (import 'alerts/ingest-storage.libsonnet') +
(import 'alerts/continuous-test.libsonnet'),
}
diff --git a/operations/mimir-mixin/alerts/alerts.libsonnet b/operations/mimir-mixin/alerts/alerts.libsonnet
index ca67c94b897..e0fceef2586 100644
--- a/operations/mimir-mixin/alerts/alerts.libsonnet
+++ b/operations/mimir-mixin/alerts/alerts.libsonnet
@@ -79,26 +79,6 @@ local utils = import 'mixin-utils/utils.libsonnet';
||| % $._config,
},
},
- {
- alert: $.alertName('QueriesIncorrect'),
- expr: |||
- 100 * sum by (%(group_by)s) (rate(test_exporter_test_case_result_total{result="fail"}[%(range_interval)s]))
- /
- sum by (%(group_by)s) (rate(test_exporter_test_case_result_total[%(range_interval)s])) > 1
- ||| % {
- group_by: $._config.alert_aggregation_labels,
- range_interval: $.alertRangeInterval(5),
- },
- 'for': '15m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- message: |||
- The %(product)s cluster %(alert_aggregation_variables)s is experiencing {{ printf "%%.2f" $value }}%% incorrect query results.
- ||| % $._config,
- },
- },
{
alert: $.alertName('InconsistentRuntimeConfig'),
expr: |||
diff --git a/operations/mimir-mixin/alerts/blocks.libsonnet b/operations/mimir-mixin/alerts/blocks.libsonnet
index 8f6ed51d2f8..28c3073e140 100644
--- a/operations/mimir-mixin/alerts/blocks.libsonnet
+++ b/operations/mimir-mixin/alerts/blocks.libsonnet
@@ -14,17 +14,18 @@
(max by(%(alert_aggregation_labels)s, %(per_instance_label)s) (cortex_ingester_shipper_last_successful_upload_timestamp_seconds) > 0)
and
# Only if the ingester has ingested samples over the last 4h.
- (max by(%(alert_aggregation_labels)s, %(per_instance_label)s) (max_over_time(%(alert_aggregation_rule_prefix)s_%(per_instance_label)s:cortex_ingester_ingested_samples_total:rate1m[4h])) > 0)
+ (max by(%(alert_aggregation_labels)s, %(per_instance_label)s) (max_over_time(%(alert_aggregation_rule_prefix)s_%(per_instance_label)s:cortex_ingester_ingested_samples_total:rate%(recording_rules_range_interval)s[4h])) > 0)
and
# Only if the ingester was ingesting samples 4h ago. This protects against the case where the ingester replica
# had ingested samples in the past, then no traffic was received for a long period and then it starts
# receiving samples again. Without this check, the alert would fire as soon as it gets back receiving
# samples, while the a block shipping is expected within the next 4h.
- (max by(%(alert_aggregation_labels)s, %(per_instance_label)s) (max_over_time(%(alert_aggregation_rule_prefix)s_%(per_instance_label)s:cortex_ingester_ingested_samples_total:rate1m[1h] offset 4h)) > 0)
+ (max by(%(alert_aggregation_labels)s, %(per_instance_label)s) (max_over_time(%(alert_aggregation_rule_prefix)s_%(per_instance_label)s:cortex_ingester_ingested_samples_total:rate%(recording_rules_range_interval)s[1h] offset 4h)) > 0)
||| % {
alert_aggregation_labels: $._config.alert_aggregation_labels,
per_instance_label: $._config.per_instance_label,
alert_aggregation_rule_prefix: $._config.alert_aggregation_rule_prefix,
+ recording_rules_range_interval: $._config.recording_rules_range_interval,
},
labels: {
severity: 'critical',
@@ -41,11 +42,12 @@
expr: |||
(max by(%(alert_aggregation_labels)s, %(per_instance_label)s) (cortex_ingester_shipper_last_successful_upload_timestamp_seconds) == 0)
and
- (max by(%(alert_aggregation_labels)s, %(per_instance_label)s) (max_over_time(%(alert_aggregation_rule_prefix)s_%(per_instance_label)s:cortex_ingester_ingested_samples_total:rate1m[4h])) > 0)
+ (max by(%(alert_aggregation_labels)s, %(per_instance_label)s) (max_over_time(%(alert_aggregation_rule_prefix)s_%(per_instance_label)s:cortex_ingester_ingested_samples_total:rate%(recording_rules_range_interval)s[4h])) > 0)
||| % {
alert_aggregation_labels: $._config.alert_aggregation_labels,
per_instance_label: $._config.per_instance_label,
alert_aggregation_rule_prefix: $._config.alert_aggregation_rule_prefix,
+ recording_rules_range_interval: $._config.recording_rules_range_interval,
},
labels: {
severity: 'critical',
diff --git a/operations/mimir-mixin/alerts/ingest-storage.libsonnet b/operations/mimir-mixin/alerts/ingest-storage.libsonnet
new file mode 100644
index 00000000000..81944eb040e
--- /dev/null
+++ b/operations/mimir-mixin/alerts/ingest-storage.libsonnet
@@ -0,0 +1,131 @@
+(import 'alerts-utils.libsonnet') {
+ local alertGroups = [
+ {
+ name: 'mimir_ingest_storage_alerts',
+ rules: [
+ {
+ alert: $.alertName('IngesterLastConsumedOffsetCommitFailed'),
+ 'for': '15m',
+ expr: |||
+ sum by(%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingest_storage_reader_offset_commit_failures_total[5m]))
+ /
+ sum by(%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingest_storage_reader_offset_commit_requests_total[5m]))
+ > 0.2
+ ||| % $._config,
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ message: '%(product)s {{ $labels.%(per_instance_label)s }} in %(alert_aggregation_variables)s is failing to commit the last consumed offset.' % $._config,
+ },
+ },
+
+ {
+ alert: $.alertName('IngesterFailedToReadRecordsFromKafka'),
+ 'for': '5m',
+
+ // Metric used by this alert is reported by Kafka client on read errors from connection to Kafka.
+ // We use node_id to only alert if problems to the same Kafka node are repeating.
+ // If problems are for different nodes (eg. during rollout), that is not a problem, and we don't need to trigger alert.
+ expr: |||
+ sum by(%(alert_aggregation_labels)s, %(per_instance_label)s, node_id) (rate(cortex_ingest_storage_reader_read_errors_total[1m]))
+ > 0
+ ||| % $._config,
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ message: '%(product)s {{ $labels.%(per_instance_label)s }} in %(alert_aggregation_variables)s is failing to read records from Kafka.' % $._config,
+ },
+ },
+
+ {
+ alert: $.alertName('IngesterKafkaFetchErrorsRateTooHigh'),
+ 'for': '15m',
+ // See https://github.com/grafana/mimir/blob/24591ae56cd7d6ef24a7cc1541a41405676773f4/vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go#L332-L366 for errors that can be reported here.
+ expr: |||
+ sum by (%(alert_aggregation_labels)s, %(per_instance_label)s) (rate (cortex_ingest_storage_reader_fetch_errors_total[5m]))
+ /
+ sum by (%(alert_aggregation_labels)s, %(per_instance_label)s) (rate (cortex_ingest_storage_reader_fetches_total[5m]))
+ > 0.1
+ ||| % $._config,
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ message: '%(product)s {{ $labels.%(per_instance_label)s }} in %(alert_aggregation_variables)s is receiving fetch errors when reading records from Kafka.' % $._config,
+ },
+ },
+
+ // This is an experiment. We compute derivatition (ie. rate of consumption lag change) over 5 minutes. If derivation is above 0, it means consumption lag is increasing, instead of decreasing.
+ {
+ alert: $.alertName('StartingIngesterKafkaReceiveDelayIncreasing'),
+ 'for': '5m',
+ // We're using series from classic histogram here, because mixtool lint doesn't support histogram_sum, histogram_count functions yet.
+ expr: |||
+ deriv((
+ sum by (%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingest_storage_reader_receive_delay_seconds_sum{phase="starting"}[1m]))
+ /
+ sum by (%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingest_storage_reader_receive_delay_seconds_count{phase="starting"}[1m]))
+ )[5m:1m]) > 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ message: '%(product)s {{ $labels.%(per_instance_label)s }} in %(alert_aggregation_variables)s in "starting" phase is not reducing consumption lag of write requests read from Kafka.' % $._config,
+ },
+ },
+
+ {
+ alert: $.alertName('RunningIngesterReceiveDelayTooHigh'),
+ 'for': '5m',
+ // We're using series from classic histogram here, because mixtool lint doesn't support histogram_sum, histogram_count functions yet.
+ expr: |||
+ (
+ sum by (%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingest_storage_reader_receive_delay_seconds_sum{phase="running"}[1m]))
+ /
+ sum by (%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingest_storage_reader_receive_delay_seconds_count{phase="running"}[1m]))
+ ) > (10 * 60)
+ ||| % $._config,
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ message: '%(product)s {{ $labels.%(per_instance_label)s }} in %(alert_aggregation_variables)s in "running" phase is too far behind in its consumption of write requests from Kafka.' % $._config,
+ },
+ },
+
+ {
+ alert: $.alertName('IngesterFailsToProcessRecordsFromKafka'),
+ 'for': '5m',
+ expr: |||
+ sum by (%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingest_storage_reader_records_failed_total{cause="server"}[1m])) > 0
+ ||| % $._config,
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ message: '%(product)s {{ $labels.%(per_instance_label)s }} in %(alert_aggregation_variables)s fails to consume write requests read from Kafka due to internal errors.' % $._config,
+ },
+ },
+
+ {
+ alert: $.alertName('IngesterFailsEnforceStrongConsistencyOnReadPath'),
+ 'for': '5m',
+ expr: |||
+ sum by (%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingest_storage_strong_consistency_failures_total[1m])) > 0
+ ||| % $._config,
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ message: '%(product)s {{ $labels.%(per_instance_label)s }} in %(alert_aggregation_variables)s fails to enforce strong-consistency on read-path.' % $._config,
+ },
+ },
+ ],
+ },
+ ],
+
+ groups+: $.withRunbookURL('https://grafana.com/docs/mimir/latest/operators-guide/mimir-runbooks/#%s', $.withExtraLabelsAnnotations(alertGroups)),
+}
diff --git a/operations/mimir-mixin/config.libsonnet b/operations/mimir-mixin/config.libsonnet
index d40c3a233e9..21d86626932 100644
--- a/operations/mimir-mixin/config.libsonnet
+++ b/operations/mimir-mixin/config.libsonnet
@@ -57,11 +57,13 @@
},
// Some dashboards show panels grouping together multiple components of a given "path".
- // This mapping configures which components belong to each group.
+ // This mapping configures which components belong to each group. A component can belong
+ // to multiple groups.
local componentGroups = {
write: ['distributor', 'ingester', 'mimir_write'],
read: ['query_frontend', 'querier', 'ruler_query_frontend', 'ruler_querier', 'mimir_read'],
backend: ['query_scheduler', 'ruler_query_scheduler', 'ruler', 'store_gateway', 'compactor', 'alertmanager', 'overrides_exporter', 'mimir_backend'],
+ remote_ruler_read: ['ruler_query_frontend', 'ruler_query_scheduler', 'ruler_querier'],
},
// These are used by the dashboards and allow for the simultaneous display of
@@ -133,6 +135,7 @@
write: componentsGroupMatcher(componentGroups.write),
read: componentsGroupMatcher(componentGroups.read),
backend: componentsGroupMatcher(componentGroups.backend),
+ remote_ruler_read: componentsGroupMatcher(componentGroups.remote_ruler_read),
},
all_instances: std.join('|', std.map(function(name) componentNameRegexp[name], componentGroups.write + componentGroups.read + componentGroups.backend)),
diff --git a/operations/mimir-mixin/dashboards.libsonnet b/operations/mimir-mixin/dashboards.libsonnet
index c4e9d6f9e9a..a9fb02659ae 100644
--- a/operations/mimir-mixin/dashboards.libsonnet
+++ b/operations/mimir-mixin/dashboards.libsonnet
@@ -23,6 +23,7 @@
(import 'dashboards/overview-networking.libsonnet') +
(import 'dashboards/reads-resources.libsonnet') +
(import 'dashboards/remote-ruler-reads-resources.libsonnet') +
+ (import 'dashboards/remote-ruler-reads-networking.libsonnet') +
(import 'dashboards/reads-networking.libsonnet') +
(import 'dashboards/writes-resources.libsonnet') +
(import 'dashboards/writes-networking.libsonnet') +
diff --git a/operations/mimir-mixin/dashboards/dashboard-utils.libsonnet b/operations/mimir-mixin/dashboards/dashboard-utils.libsonnet
index 47a347cb1a2..90e76b5c19c 100644
--- a/operations/mimir-mixin/dashboards/dashboard-utils.libsonnet
+++ b/operations/mimir-mixin/dashboards/dashboard-utils.libsonnet
@@ -596,179 +596,159 @@ local utils = import 'mixin-utils/utils.libsonnet';
$.latencyPanel('cortex_kv_request_duration_seconds', '{%s, kv_name=~"%s"}' % [$.jobMatcher($._config.job_names[jobName]), kvName])
),
- cpuAndMemoryBasedAutoScalingRow(componentTitle)::
- local component = std.asciiLower(componentTitle);
- local field = std.strReplace(component, '-', '_');
- super.row('%s - autoscaling' % [componentTitle])
- .addPanel(
- local title = 'Replicas';
- $.timeseriesPanel(title) +
- $.queryPanel(
- [
- |||
- max by (scaletargetref_name) (
- kube_horizontalpodautoscaler_spec_max_replicas{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
- # Add the scaletargetref_name label for readability
- + on (%(cluster_labels)s, horizontalpodautoscaler) group_left (scaletargetref_name)
- 0*kube_horizontalpodautoscaler_info{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
- )
- ||| % {
- namespace_matcher: $.namespaceMatcher(),
- hpa_name: $._config.autoscaling[field].hpa_name,
- cluster_labels: std.join(', ', $._config.cluster_labels),
- },
- |||
- max by (scaletargetref_name) (
- kube_horizontalpodautoscaler_status_current_replicas{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
- # HPA doesn't go to 0 replicas, so we multiply by 0 if the HPA is not active
- * on (%(cluster_labels)s, horizontalpodautoscaler)
- kube_horizontalpodautoscaler_status_condition{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s", condition="ScalingActive", status="true"}
- # Add the scaletargetref_name label for readability
- + on (%(cluster_labels)s, horizontalpodautoscaler) group_left (scaletargetref_name)
- 0*kube_horizontalpodautoscaler_info{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
- )
- ||| % {
- namespace_matcher: $.namespaceMatcher(),
- hpa_name: $._config.autoscaling[field].hpa_name,
- cluster_labels: std.join(', ', $._config.cluster_labels),
- },
- |||
- max by (scaletargetref_name) (
- kube_horizontalpodautoscaler_spec_min_replicas{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
- # Add the scaletargetref_name label for readability
- + on (%(cluster_labels)s, horizontalpodautoscaler) group_left (scaletargetref_name)
- 0*kube_horizontalpodautoscaler_info{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
- )
- ||| % {
- namespace_matcher: $.namespaceMatcher(),
- hpa_name: $._config.autoscaling[field].hpa_name,
- cluster_labels: std.join(', ', $._config.cluster_labels),
- },
- ],
- [
- 'Max {{ scaletargetref_name }}',
- 'Current {{ scaletargetref_name }}',
- 'Min {{ scaletargetref_name }}',
- ],
- ) +
- $.panelDescription(
- title,
+ // The provided componentName should be the name of a component among the ones defined in $._config.autoscaling.
+ autoScalingActualReplicas(componentName)::
+ local title = 'Replicas';
+ local componentTitle = std.strReplace(componentName, '_', '-');
+
+ $.timeseriesPanel(title) +
+ $.queryPanel(
+ [
|||
- The maximum and current number of %s replicas.
- Note: The current number of replicas can still show 1 replica even when scaled to 0.
- Because HPA never reports 0 replicas, the query will report 0 only if the HPA is not active.
- ||| % [component]
- ) +
- {
- fieldConfig+: {
- overrides: [
- $.overrideField('byRegexp', '/Max .+/', [
- $.overrideProperty('custom.fillOpacity', 0),
- $.overrideProperty('custom.lineStyle', { fill: 'dash' }),
- ]),
- $.overrideField('byRegexp', '/Current .+/', [
- $.overrideProperty('custom.fillOpacity', 0),
- ]),
- $.overrideField('byRegexp', '/Min .+/', [
- $.overrideProperty('custom.fillOpacity', 0),
- $.overrideProperty('custom.lineStyle', { fill: 'dash' }),
- ]),
- ],
+ max by (scaletargetref_name) (
+ kube_horizontalpodautoscaler_spec_max_replicas{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
+ # Add the scaletargetref_name label for readability
+ + on (%(cluster_labels)s, horizontalpodautoscaler) group_left (scaletargetref_name)
+ 0*kube_horizontalpodautoscaler_info{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
+ )
+ ||| % {
+ namespace_matcher: $.namespaceMatcher(),
+ hpa_name: $._config.autoscaling[componentName].hpa_name,
+ cluster_labels: std.join(', ', $._config.cluster_labels),
+ },
+ |||
+ max by (scaletargetref_name) (
+ kube_horizontalpodautoscaler_status_current_replicas{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
+ # HPA doesn't go to 0 replicas, so we multiply by 0 if the HPA is not active
+ * on (%(cluster_labels)s, horizontalpodautoscaler)
+ kube_horizontalpodautoscaler_status_condition{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s", condition="ScalingActive", status="true"}
+ # Add the scaletargetref_name label for readability
+ + on (%(cluster_labels)s, horizontalpodautoscaler) group_left (scaletargetref_name)
+ 0*kube_horizontalpodautoscaler_info{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
+ )
+ ||| % {
+ namespace_matcher: $.namespaceMatcher(),
+ hpa_name: $._config.autoscaling[componentName].hpa_name,
+ cluster_labels: std.join(', ', $._config.cluster_labels),
+ },
+ |||
+ max by (scaletargetref_name) (
+ kube_horizontalpodautoscaler_spec_min_replicas{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
+ # Add the scaletargetref_name label for readability
+ + on (%(cluster_labels)s, horizontalpodautoscaler) group_left (scaletargetref_name)
+ 0*kube_horizontalpodautoscaler_info{%(namespace_matcher)s, horizontalpodautoscaler=~"%(hpa_name)s"}
+ )
+ ||| % {
+ namespace_matcher: $.namespaceMatcher(),
+ hpa_name: $._config.autoscaling[componentName].hpa_name,
+ cluster_labels: std.join(', ', $._config.cluster_labels),
},
+ ],
+ [
+ 'Max {{ scaletargetref_name }}',
+ 'Current {{ scaletargetref_name }}',
+ 'Min {{ scaletargetref_name }}',
+ ],
+ ) +
+ $.panelDescription(
+ title,
+ |||
+ The maximum and current number of %s replicas.
+ Note: The current number of replicas can still show 1 replica even when scaled to 0.
+ Because HPA never reports 0 replicas, the query will report 0 only if the HPA is not active.
+ ||| % [componentTitle]
+ ) +
+ {
+ fieldConfig+: {
+ overrides: [
+ $.overrideField('byRegexp', '/Max .+/', [
+ $.overrideProperty('custom.fillOpacity', 0),
+ $.overrideProperty('custom.lineStyle', { fill: 'dash' }),
+ ]),
+ $.overrideField('byRegexp', '/Current .+/', [
+ $.overrideProperty('custom.fillOpacity', 0),
+ ]),
+ $.overrideField('byRegexp', '/Min .+/', [
+ $.overrideProperty('custom.fillOpacity', 0),
+ $.overrideProperty('custom.lineStyle', { fill: 'dash' }),
+ ]),
+ ],
},
- )
- .addPanel(
- local title = 'Scaling metric (CPU): Desired replicas';
- $.timeseriesPanel(title) +
- $.queryPanel(
- [
- |||
- sum by (scaler) (
+ },
+
+ // The provided componentName should be the name of a component among the ones defined in $._config.autoscaling.
+ autoScalingDesiredReplicasByScalingMetricPanel(componentName, scalingMetricName, scalingMetricID)::
+ local title = 'Scaling metric (%s): Desired replicas' % scalingMetricName;
+
+ $.timeseriesPanel(title) +
+ $.queryPanel(
+ [
+ |||
+ sum by (scaler) (
+ label_replace(
+ keda_scaler_metrics_value{%(cluster_label)s=~"$cluster", exported_namespace=~"$namespace", scaler=~".*%(scaling_metric_id)s.*"},
+ "namespace", "$1", "exported_namespace", "(.*)"
+ )
+ /
+ on(%(aggregation_labels)s, scaledObject, metric) group_left label_replace(
label_replace(
- keda_scaler_metrics_value{%(cluster_label)s=~"$cluster", exported_namespace=~"$namespace", scaler=~".*cpu.*"},
- "namespace", "$1", "exported_namespace", "(.*)"
- )
- /
- on(%(aggregation_labels)s, scaledObject, metric) group_left label_replace(
- label_replace(
- kube_horizontalpodautoscaler_spec_target_metric{%(namespace)s, horizontalpodautoscaler=~"%(hpa_name)s"},
- "metric", "$1", "metric_name", "(.+)"
- ),
- "scaledObject", "$1", "horizontalpodautoscaler", "%(hpa_prefix)s(.*)"
- )
+ kube_horizontalpodautoscaler_spec_target_metric{%(namespace)s, horizontalpodautoscaler=~"%(hpa_name)s"},
+ "metric", "$1", "metric_name", "(.+)"
+ ),
+ "scaledObject", "$1", "horizontalpodautoscaler", "%(hpa_prefix)s(.*)"
)
- ||| % {
- aggregation_labels: $._config.alert_aggregation_labels,
- cluster_label: $._config.per_cluster_label,
- hpa_prefix: $._config.autoscaling_hpa_prefix,
- hpa_name: $._config.autoscaling[field].hpa_name,
- namespace: $.namespaceMatcher(),
- },
- ], [
- '{{ scaler }}',
- ]
- ) +
- $.panelDescription(
- title,
- |||
- This panel shows the scaling metric exposed by KEDA divided by the target/threshold used.
- It should represent the desired number of replicas, ignoring the min/max constraints applied later.
- |||
- ),
+ )
+ ||| % {
+ aggregation_labels: $._config.alert_aggregation_labels,
+ cluster_label: $._config.per_cluster_label,
+ hpa_prefix: $._config.autoscaling_hpa_prefix,
+ hpa_name: $._config.autoscaling[componentName].hpa_name,
+ namespace: $.namespaceMatcher(),
+ scaling_metric_id: scalingMetricID,
+ },
+ ], [
+ '{{ scaler }}',
+ ]
+ ) +
+ $.panelDescription(
+ title,
+ |||
+ This panel shows the scaling metric exposed by KEDA divided by the target/threshold used.
+ It should represent the desired number of replicas, ignoring the min/max constraints applied later.
+ |||
+ ),
+
+ // The provided componentName should be the name of a component among the ones defined in $._config.autoscaling.
+ autoScalingFailuresPanel(componentName)::
+ local title = 'Autoscaler failures rate';
+
+ $.timeseriesPanel(title) +
+ $.queryPanel(
+ $.filterKedaScalerErrorsByHPA($._config.autoscaling[componentName].hpa_name),
+ '{{scaler}} failures'
+ ) +
+ $.panelDescription(
+ title,
+ |||
+ The rate of failures in the KEDA custom metrics API server. Whenever an error occurs, the KEDA custom
+ metrics server is unable to query the scaling metric from Prometheus so the autoscaler woudln't work properly.
+ |||
+ ),
+
+ cpuAndMemoryBasedAutoScalingRow(componentTitle)::
+ local componentName = std.strReplace(std.asciiLower(componentTitle), '-', '_');
+ super.row('%s - autoscaling' % [componentTitle])
+ .addPanel(
+ $.autoScalingActualReplicas(componentName)
)
.addPanel(
- local title = 'Scaling metric (memory): Desired replicas';
- $.timeseriesPanel(title) +
- $.queryPanel(
- [
- |||
- sum by (scaler) (
- label_replace(
- keda_scaler_metrics_value{%(cluster_label)s=~"$cluster", exported_namespace=~"$namespace", scaler=~".*memory.*"},
- "namespace", "$1", "exported_namespace", "(.*)"
- )
- /
- on(%(aggregation_labels)s, scaledObject, metric) group_left label_replace(
- label_replace(
- kube_horizontalpodautoscaler_spec_target_metric{%(namespace)s, horizontalpodautoscaler=~"%(hpa_name)s"},
- "metric", "$1", "metric_name", "(.+)"
- ),
- "scaledObject", "$1", "horizontalpodautoscaler", "%(hpa_prefix)s(.*)"
- )
- )
- ||| % {
- aggregation_labels: $._config.alert_aggregation_labels,
- cluster_label: $._config.per_cluster_label,
- hpa_prefix: $._config.autoscaling_hpa_prefix,
- hpa_name: $._config.autoscaling[field].hpa_name,
- namespace: $.namespaceMatcher(),
- },
- ], [
- '{{ scaler }}',
- ]
- ) +
- $.panelDescription(
- title,
- |||
- This panel shows the scaling metric exposed by KEDA divided by the target/threshold used.
- It should represent the desired number of replicas, ignoring the min/max constraints applied later.
- |||
- ),
+ $.autoScalingDesiredReplicasByScalingMetricPanel(componentName, 'CPU', 'cpu')
)
.addPanel(
- local title = 'Autoscaler failures rate';
- $.timeseriesPanel(title) +
- $.queryPanel(
- $.filterKedaScalerErrorsByHPA($._config.autoscaling[field].hpa_name),
- '{{scaler}} failures'
- ) +
- $.panelDescription(
- title,
- |||
- The rate of failures in the KEDA custom metrics API server. Whenever an error occurs, the KEDA custom
- metrics server is unable to query the scaling metric from Prometheus so the autoscaler woudln't work properly.
- |||
- ),
+ $.autoScalingDesiredReplicasByScalingMetricPanel(componentName, 'memory', 'memory')
+ )
+ .addPanel(
+ $.autoScalingFailuresPanel(componentName)
),
newStatPanel(queries, legends='', unit='percentunit', decimals=1, thresholds=[], instant=false, novalue='')::
diff --git a/operations/mimir-mixin/dashboards/queries.libsonnet b/operations/mimir-mixin/dashboards/queries.libsonnet
index 0e78156d2e6..93d605b0706 100644
--- a/operations/mimir-mixin/dashboards/queries.libsonnet
+++ b/operations/mimir-mixin/dashboards/queries.libsonnet
@@ -240,10 +240,10 @@ local filename = 'mimir-queries.json';
) +
$.queryPanel(
[
- 'max(max_over_time(cortex_ingest_storage_strong_consistency_wait_duration_seconds{%s,quantile="0.5"}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(max_over_time(cortex_ingest_storage_strong_consistency_wait_duration_seconds{%s,quantile="0.99"}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(max_over_time(cortex_ingest_storage_strong_consistency_wait_duration_seconds{%s,quantile="0.999"}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(max_over_time(cortex_ingest_storage_strong_consistency_wait_duration_seconds{%s,quantile="1.0"}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.5, sum(rate(cortex_ingest_storage_strong_consistency_wait_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.99, sum(rate(cortex_ingest_storage_strong_consistency_wait_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.999, sum(rate(cortex_ingest_storage_strong_consistency_wait_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(1.0, sum(rate(cortex_ingest_storage_strong_consistency_wait_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
],
[
'50th percentile',
@@ -300,10 +300,10 @@ local filename = 'mimir-queries.json';
) +
$.queryPanel(
[
- 'max(max_over_time(cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds{%s,quantile="0.5"}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(max_over_time(cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds{%s,quantile="0.99"}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(max_over_time(cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds{%s,quantile="0.999"}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(max_over_time(cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds{%s,quantile="1.0"}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.5, sum(rate(cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.99, sum(rate(cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.999, sum(rate(cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(1.0, sum(rate(cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
],
[
'50th percentile',
diff --git a/operations/mimir-mixin/dashboards/remote-ruler-reads-networking.libsonnet b/operations/mimir-mixin/dashboards/remote-ruler-reads-networking.libsonnet
new file mode 100644
index 00000000000..87d100d2c2e
--- /dev/null
+++ b/operations/mimir-mixin/dashboards/remote-ruler-reads-networking.libsonnet
@@ -0,0 +1,22 @@
+local utils = import 'mixin-utils/utils.libsonnet';
+local filename = 'mimir-remote-ruler-reads-networking.json';
+
+(import 'dashboard-utils.libsonnet') {
+ [filename]:
+ assert std.md5(filename) == '9e8cfff65f91632f8a25981c6fe44bc9' : 'UID of the dashboard has changed, please update references to dashboard.';
+ ($.dashboard('Remote ruler reads networking') + { uid: std.md5(filename) })
+ .addClusterSelectorTemplates(false)
+ .addRow($.containerNetworkingRowByComponent('Summary', 'remote_ruler_read'))
+ .addRow($.containerNetworkingRowByComponent('Ruler-query-frontend', 'ruler_query_frontend'))
+ .addRow($.containerNetworkingRowByComponent('Ruler-query-scheduler', 'ruler_query_scheduler'))
+ .addRow($.containerNetworkingRowByComponent('Ruler-querier', 'ruler_querier'))
+ + {
+ templating+: {
+ list: [
+ // Do not allow to include all namespaces.
+ l + (if (l.name == 'namespace') then { includeAll: false } else {})
+ for l in super.list
+ ],
+ },
+ },
+}
diff --git a/operations/mimir-mixin/dashboards/remote-ruler-reads-resources.libsonnet b/operations/mimir-mixin/dashboards/remote-ruler-reads-resources.libsonnet
index ee82ed22013..df04952f29d 100644
--- a/operations/mimir-mixin/dashboards/remote-ruler-reads-resources.libsonnet
+++ b/operations/mimir-mixin/dashboards/remote-ruler-reads-resources.libsonnet
@@ -7,7 +7,7 @@ local filename = 'mimir-remote-ruler-reads-resources.json';
($.dashboard('Remote ruler reads resources') + { uid: std.md5(filename) })
.addClusterSelectorTemplates(false)
.addRow(
- $.row('Query-frontend (dedicated to ruler)')
+ $.row('Ruler-query-frontend')
.addPanel(
$.containerCPUUsagePanelByComponent('ruler_query_frontend'),
)
@@ -19,7 +19,7 @@ local filename = 'mimir-remote-ruler-reads-resources.json';
)
)
.addRow(
- $.row('Query-scheduler (dedicated to ruler)')
+ $.row('Ruler-query-scheduler')
.addPanel(
$.containerCPUUsagePanelByComponent('ruler_query_scheduler'),
)
@@ -31,7 +31,7 @@ local filename = 'mimir-remote-ruler-reads-resources.json';
)
)
.addRow(
- $.row('Querier (dedicated to ruler)')
+ $.row('Ruler-querier')
.addPanel(
$.containerCPUUsagePanelByComponent('ruler_querier'),
)
diff --git a/operations/mimir-mixin/dashboards/remote-ruler-reads.libsonnet b/operations/mimir-mixin/dashboards/remote-ruler-reads.libsonnet
index df1f48f4aa8..87164d273da 100644
--- a/operations/mimir-mixin/dashboards/remote-ruler-reads.libsonnet
+++ b/operations/mimir-mixin/dashboards/remote-ruler-reads.libsonnet
@@ -54,7 +54,7 @@ local filename = 'mimir-remote-ruler-reads.json';
)
)
.addRow(
- $.row('Query-frontend (dedicated to ruler)')
+ $.row('Ruler-query-frontend')
.addPanel(
$.timeseriesPanel('Requests / sec') +
$.qpsPanel('cortex_request_duration_seconds_count{%s, route=~"%s"}' % [$.jobMatcher($._config.job_names.ruler_query_frontend), rulerRoutesRegex])
@@ -80,7 +80,7 @@ local filename = 'mimir-remote-ruler-reads.json';
these panels will show "No data."
|||;
- $.row('Query-scheduler (dedicated to ruler)')
+ $.row('Ruler-query-scheduler')
.addPanel(
local title = 'Requests / sec';
$.timeseriesPanel(title) +
@@ -127,7 +127,7 @@ local filename = 'mimir-remote-ruler-reads.json';
regex: '^$',
},
];
- $.row('Query-scheduler Latency (Time in Queue) Breakout by Additional Queue Dimensions')
+ $.row('Ruler-query-scheduler Latency (Time in Queue) Breakout by Additional Queue Dimensions')
.addPanel(
local title = '99th Percentile Latency by Queue Dimension';
$.timeseriesPanel(title) +
@@ -169,7 +169,7 @@ local filename = 'mimir-remote-ruler-reads.json';
)
)
.addRow(
- $.row('Querier (dedicated to ruler)')
+ $.row('Ruler-querier')
.addPanel(
$.timeseriesPanel('Requests / sec') +
$.qpsPanel('cortex_querier_request_duration_seconds_count{%s, route=~"%s"}' % [$.jobMatcher($._config.job_names.ruler_querier), $.queries.read_http_routes_regex])
@@ -187,6 +187,25 @@ local filename = 'mimir-remote-ruler-reads.json';
)
.addRowIf(
$._config.autoscaling.ruler_querier.enabled,
- $.cpuAndMemoryBasedAutoScalingRow('Ruler-Querier'),
+ $.row('Ruler-querier - autoscaling')
+ .addPanel(
+ $.autoScalingActualReplicas('ruler_querier')
+ )
+ .addPanel(
+ $.autoScalingFailuresPanel('ruler_querier')
+ )
+ )
+ .addRowIf(
+ $._config.autoscaling.ruler_querier.enabled,
+ $.row('')
+ .addPanel(
+ $.autoScalingDesiredReplicasByScalingMetricPanel('ruler_querier', 'CPU', 'cpu')
+ )
+ .addPanel(
+ $.autoScalingDesiredReplicasByScalingMetricPanel('ruler_querier', 'memory', 'memory')
+ )
+ .addPanel(
+ $.autoScalingDesiredReplicasByScalingMetricPanel('ruler_querier', 'in-flight queries', 'queries')
+ )
),
}
diff --git a/operations/mimir-mixin/dashboards/slow-queries.libsonnet b/operations/mimir-mixin/dashboards/slow-queries.libsonnet
index b133f01f16c..b1ed99adc65 100644
--- a/operations/mimir-mixin/dashboards/slow-queries.libsonnet
+++ b/operations/mimir-mixin/dashboards/slow-queries.libsonnet
@@ -7,7 +7,7 @@ local filename = 'mimir-slow-queries.json';
($.dashboard('Slow queries') + { uid: std.md5(filename) })
.addClusterSelectorTemplates(false)
.addRow(
- $.row('Accross tenants')
+ $.row('Across tenants')
.addPanel(
$.timeseriesPanel('Response time') +
$.lokiMetricsQueryPanel(
diff --git a/operations/mimir-mixin/dashboards/top-tenants.libsonnet b/operations/mimir-mixin/dashboards/top-tenants.libsonnet
index 4359ee3d7e0..96eab666278 100644
--- a/operations/mimir-mixin/dashboards/top-tenants.libsonnet
+++ b/operations/mimir-mixin/dashboards/top-tenants.libsonnet
@@ -58,8 +58,10 @@ local filename = 'mimir-top-tenants.json';
distributor: $.jobMatcher($._config.job_names.distributor),
group_by_cluster: $._config.group_by_cluster,
},
- ],
- { 'Value #A': { alias: 'series' } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'series' },
+ }
)
),
)
@@ -72,8 +74,10 @@ local filename = 'mimir-top-tenants.json';
$.tablePanel(
[
'topk($limit, %(in_memory_series_per_user)s)' % { in_memory_series_per_user: in_memory_series_per_user_query() },
- ],
- { 'Value #A': { alias: 'series' } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'series' },
+ }
)
),
)
@@ -107,8 +111,10 @@ local filename = 'mimir-top-tenants.json';
[
'topk($limit, sum by (user) (rate(cortex_distributor_received_samples_total{%(job)s}[5m])))'
% { job: $.jobMatcher($._config.job_names.distributor) },
- ],
- { 'Value #A': { alias: 'samples/s' } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'samples/s' },
+ }
)
),
)
@@ -143,8 +149,10 @@ local filename = 'mimir-top-tenants.json';
[
'topk($limit, sum by (user) (rate(cortex_discarded_samples_total{%(job)s}[5m])))'
% { job: $.jobMatcher($._config.job_names.ingester + $._config.job_names.distributor) },
- ],
- { 'Value #A': { alias: 'samples/s' } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'samples/s' },
+ }
)
),
)
@@ -190,8 +198,10 @@ local filename = 'mimir-top-tenants.json';
distributor: $.jobMatcher($._config.job_names.distributor),
group_by_cluster: $._config.group_by_cluster,
},
- ],
- { 'Value #A': { alias: 'series' } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'series' },
+ }
)
),
)
@@ -205,8 +215,10 @@ local filename = 'mimir-top-tenants.json';
[
'topk($limit, sum by (user) (rate(cortex_distributor_received_exemplars_total{%(job)s}[5m])))'
% { job: $.jobMatcher($._config.job_names.distributor) },
- ],
- { 'Value #A': { alias: 'exemplars/s' } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'exemplars/s' },
+ }
)
),
)
@@ -221,8 +233,10 @@ local filename = 'mimir-top-tenants.json';
[
'topk($limit, sum by (rule_group, user) (cortex_prometheus_rule_group_rules{%(job)s}))'
% { job: $.jobMatcher($._config.job_names.ruler) },
- ],
- { 'Value #A': { alias: 'rules' } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'rules' },
+ }
)
),
)
@@ -236,8 +250,10 @@ local filename = 'mimir-top-tenants.json';
[
'topk($limit, sum by (rule_group, user) (cortex_prometheus_rule_group_last_duration_seconds{%(job)s}))'
% { job: $.jobMatcher($._config.job_names.ruler) },
- ],
- { 'Value #A': { alias: 'seconds' } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'seconds' },
+ }
)
)
)
@@ -256,8 +272,10 @@ local filename = 'mimir-top-tenants.json';
(sum(rate(cortex_bucket_index_estimated_compaction_jobs_errors_total{%s}[$__rate_interval])) == 0)
)
||| % [$.jobMatcher($._config.job_names.compactor), $.jobMatcher($._config.job_names.compactor)],
- ],
- { Value: { alias: 'Compaction Jobs', decimals: 0 } }
+ ], {
+ user: { alias: 'user', unit: 'string' },
+ Value: { alias: 'Compaction Jobs', decimals: 0 },
+ }
)
),
),
diff --git a/operations/mimir-mixin/dashboards/writes.libsonnet b/operations/mimir-mixin/dashboards/writes.libsonnet
index fae071cefb4..61205581756 100644
--- a/operations/mimir-mixin/dashboards/writes.libsonnet
+++ b/operations/mimir-mixin/dashboards/writes.libsonnet
@@ -174,10 +174,10 @@ local filename = 'mimir-writes.json';
) +
$.queryPanel(
[
- 'max(cortex_ingest_storage_writer_latency_seconds{%s,quantile="0.5"})' % [$.jobMatcher($._config.job_names.distributor)],
- 'max(cortex_ingest_storage_writer_latency_seconds{%s,quantile="0.99"})' % [$.jobMatcher($._config.job_names.distributor)],
- 'max(cortex_ingest_storage_writer_latency_seconds{%s,quantile="0.999"})' % [$.jobMatcher($._config.job_names.distributor)],
- 'max(cortex_ingest_storage_writer_latency_seconds{%s,quantile="1.0"})' % [$.jobMatcher($._config.job_names.distributor)],
+ 'histogram_quantile(0.5, sum(rate(cortex_ingest_storage_writer_latency_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.distributor)],
+ 'histogram_quantile(0.99, sum(rate(cortex_ingest_storage_writer_latency_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.distributor)],
+ 'histogram_quantile(0.999, sum(rate(cortex_ingest_storage_writer_latency_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.distributor)],
+ 'histogram_quantile(1.0, sum(rate(cortex_ingest_storage_writer_latency_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.distributor)],
],
[
'50th percentile',
@@ -301,10 +301,41 @@ local filename = 'mimir-writes.json';
) +
$.queryPanel(
[
- 'max(cortex_ingest_storage_reader_processing_time_seconds{%s,quantile="0.5"})' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(cortex_ingest_storage_reader_processing_time_seconds{%s,quantile="0.99"})' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(cortex_ingest_storage_reader_processing_time_seconds{%s,quantile="0.999"})' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(cortex_ingest_storage_reader_processing_time_seconds{%s,quantile="1.0"})' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.5, sum(rate(cortex_ingest_storage_reader_processing_time_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.99, sum(rate(cortex_ingest_storage_reader_processing_time_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.999, sum(rate(cortex_ingest_storage_reader_processing_time_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(1.0, sum(rate(cortex_ingest_storage_reader_processing_time_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ ],
+ [
+ '50th percentile',
+ '99th percentile',
+ '99.9th percentile',
+ '100th percentile',
+ ],
+ ) + {
+ fieldConfig+: {
+ defaults+: { unit: 's' },
+ },
+ },
+ )
+ )
+ .addRowIf(
+ $._config.show_ingest_storage_panels,
+ ($.row('Ingester (ingest storage – end-to-end latency)'))
+ .addPanel(
+ $.timeseriesPanel('Kafka record end-to-end latency when ingesters are running') +
+ $.panelDescription(
+ 'Kafka record end-to-end latency when ingesters are running',
+ |||
+ Time between writing request by distributor to Kafka and reading the record by ingester, when ingesters are running.
+ |||
+ ) +
+ $.queryPanel(
+ [
+ 'histogram_quantile(0.5, sum(rate(cortex_ingest_storage_reader_receive_delay_seconds{%s, phase="running"}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.99, sum(rate(cortex_ingest_storage_reader_receive_delay_seconds{%s, phase="running"}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.999, sum(rate(cortex_ingest_storage_reader_receive_delay_seconds{%s, phase="running"}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(1.0, sum(rate(cortex_ingest_storage_reader_receive_delay_seconds{%s, phase="running"}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
],
[
'50th percentile',
@@ -319,19 +350,74 @@ local filename = 'mimir-writes.json';
},
)
.addPanel(
- $.timeseriesPanel('Kafka record end-to-end latency') +
+ $.timeseriesPanel('Kafka record end-to-end latency when starting') +
$.panelDescription(
- 'Kafka record end-to-end latency',
+ 'Kafka record end-to-end latency when starting',
+ |||
+ Time between writing request by distributor to Kafka and reading the record by ingester during catch-up phase, when ingesters are starting.
+ If ingesters are not starting and catching up in the selected time range, this panel will be empty.
+ |||
+ ) +
+ $.queryPanel(
+ [
+ 'histogram_quantile(0.5, sum(rate(cortex_ingest_storage_reader_receive_delay_seconds{%s, phase="starting"}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.99, sum(rate(cortex_ingest_storage_reader_receive_delay_seconds{%s, phase="starting"}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.999, sum(rate(cortex_ingest_storage_reader_receive_delay_seconds{%s, phase="starting"}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(1.0, sum(rate(cortex_ingest_storage_reader_receive_delay_seconds{%s, phase="starting"}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ ],
+ [
+ '50th percentile',
+ '99th percentile',
+ '99.9th percentile',
+ '100th percentile',
+ ],
+ ) + {
+ fieldConfig+: {
+ defaults+: { unit: 's' },
+ },
+ },
+ )
+ )
+ .addRowIf(
+ $._config.show_ingest_storage_panels,
+ ($.row('Ingester (ingest storage - last consumed offset)'))
+ .addPanel(
+ $.timeseriesPanel('Last consumed offset commits / sec') +
+ $.panelDescription(
+ 'Last consumed offset commits / sec',
+ |||
+ Rate of "last consumed offset" commits issued by ingesters to Kafka.
+ |||
+ ) +
+ $.queryPanel(
+ [
+ |||
+ sum (rate (cortex_ingest_storage_reader_offset_commit_requests_total{%s}[$__rate_interval]))
+ -
+ sum (rate (cortex_ingest_storage_reader_offset_commit_failures_total{%s}[$__rate_interval]))
+ ||| % [$.jobMatcher($._config.job_names.ingester), $.jobMatcher($._config.job_names.ingester)],
+ 'sum (rate (cortex_ingest_storage_reader_offset_commit_failures_total{%s}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
+ ],
+ [
+ 'successful',
+ 'failed',
+ ],
+ ) + $.aliasColors({ successful: $._colors.success, failed: $._colors.failed }) + $.stack,
+ )
+ .addPanel(
+ $.timeseriesPanel('Last consumed offset commits latency') +
+ $.panelDescription(
+ 'Kafka record processing latency',
|||
- Time between writing request by distributor to Kafka and reading the record by ingester.
+ Time spent to commit "last consumed offset" by ingesters to Kafka.
|||
) +
$.queryPanel(
[
- 'max(cortex_ingest_storage_reader_receive_delay_seconds{%s,quantile="0.5"})' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(cortex_ingest_storage_reader_receive_delay_seconds{%s,quantile="0.99"})' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(cortex_ingest_storage_reader_receive_delay_seconds{%s,quantile="0.999"})' % [$.jobMatcher($._config.job_names.ingester)],
- 'max(cortex_ingest_storage_reader_receive_delay_seconds{%s,quantile="1.0"})' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.5, sum(rate(cortex_ingest_storage_reader_offset_commit_request_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.99, sum(rate(cortex_ingest_storage_reader_offset_commit_request_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(0.999, sum(rate(cortex_ingest_storage_reader_offset_commit_request_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
+ 'histogram_quantile(1.0, sum(rate(cortex_ingest_storage_reader_offset_commit_request_duration_seconds{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
],
[
'50th percentile',
diff --git a/operations/mimir-mixin/recording_rules.libsonnet b/operations/mimir-mixin/recording_rules.libsonnet
index 567bae04989..5f0bb2a68d4 100644
--- a/operations/mimir-mixin/recording_rules.libsonnet
+++ b/operations/mimir-mixin/recording_rules.libsonnet
@@ -333,9 +333,9 @@ local utils = import 'mixin-utils/utils.libsonnet';
rules: [
{
// cortex_ingester_ingested_samples_total is per user, in this rule we want to see the sum per cluster/namespace/instance
- record: '%s_%s:cortex_ingester_ingested_samples_total:rate1m' % [$._config.alert_aggregation_rule_prefix, $._config.per_instance_label],
+ record: '%s_%s:cortex_ingester_ingested_samples_total:rate%s' % [$._config.alert_aggregation_rule_prefix, $._config.per_instance_label, $._config.recording_rules_range_interval],
expr: |||
- sum by(%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingester_ingested_samples_total[1m]))
+ sum by(%(alert_aggregation_labels)s, %(per_instance_label)s) (rate(cortex_ingester_ingested_samples_total[%(recording_rules_range_interval)s]))
||| % $._config,
},
],
diff --git a/operations/mimir-rules-action/Dockerfile b/operations/mimir-rules-action/Dockerfile
index 7a84e34eada..fb4eb638ace 100644
--- a/operations/mimir-rules-action/Dockerfile
+++ b/operations/mimir-rules-action/Dockerfile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-only
-FROM grafana/mimirtool:2.11.0
+FROM grafana/mimirtool:2.12.0
COPY entrypoint.sh /entrypoint.sh
diff --git a/operations/mimir-tests/test-all-components-generated.yaml b/operations/mimir-tests/test-all-components-generated.yaml
index ca450b33c80..61d6c51a63d 100644
--- a/operations/mimir-tests/test-all-components-generated.yaml
+++ b/operations/mimir-tests/test-all-components-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -631,7 +631,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -711,7 +711,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -783,7 +783,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -875,7 +875,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -952,7 +952,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1051,7 +1051,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1160,7 +1160,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1254,7 +1254,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1308,7 +1308,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1362,7 +1362,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1416,7 +1416,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1460,14 +1460,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1501,7 +1501,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-all-components-with-custom-max-skew-generated.yaml b/operations/mimir-tests/test-all-components-with-custom-max-skew-generated.yaml
index fabca78c1a4..1ca5854d4a7 100644
--- a/operations/mimir-tests/test-all-components-with-custom-max-skew-generated.yaml
+++ b/operations/mimir-tests/test-all-components-with-custom-max-skew-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -631,7 +631,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -711,7 +711,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -783,7 +783,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -875,7 +875,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -952,7 +952,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1051,7 +1051,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1160,7 +1160,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1254,7 +1254,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1308,7 +1308,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1362,7 +1362,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1416,7 +1416,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1460,14 +1460,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1501,7 +1501,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-all-components-with-tsdb-head-early-compaction-generated.yaml b/operations/mimir-tests/test-all-components-with-tsdb-head-early-compaction-generated.yaml
index 79468264f78..ac7810c5f9f 100644
--- a/operations/mimir-tests/test-all-components-with-tsdb-head-early-compaction-generated.yaml
+++ b/operations/mimir-tests/test-all-components-with-tsdb-head-early-compaction-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -631,7 +631,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -711,7 +711,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -783,7 +783,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -875,7 +875,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -952,7 +952,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1051,7 +1051,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1162,7 +1162,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1256,7 +1256,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1310,7 +1310,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1364,7 +1364,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1418,7 +1418,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1462,14 +1462,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1503,7 +1503,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-all-components-without-chunk-streaming-generated.yaml b/operations/mimir-tests/test-all-components-without-chunk-streaming-generated.yaml
index 84cc10165c2..a45518ad899 100644
--- a/operations/mimir-tests/test-all-components-without-chunk-streaming-generated.yaml
+++ b/operations/mimir-tests/test-all-components-without-chunk-streaming-generated.yaml
@@ -535,7 +535,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -632,7 +632,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -712,7 +712,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -784,7 +784,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -876,7 +876,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -953,7 +953,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1052,7 +1052,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1161,7 +1161,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1255,7 +1255,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1309,7 +1309,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1363,7 +1363,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1417,7 +1417,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1461,14 +1461,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1502,7 +1502,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-automated-downscale-generated.yaml b/operations/mimir-tests/test-automated-downscale-generated.yaml
index 4745847461d..6be1a7d4ee4 100644
--- a/operations/mimir-tests/test-automated-downscale-generated.yaml
+++ b/operations/mimir-tests/test-automated-downscale-generated.yaml
@@ -711,7 +711,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -810,7 +810,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -890,7 +890,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -962,7 +962,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1100,7 +1100,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1177,7 +1177,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1276,7 +1276,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1402,7 +1402,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1528,7 +1528,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1654,7 +1654,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1748,7 +1748,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1802,7 +1802,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1856,7 +1856,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1910,7 +1910,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1969,14 +1969,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2013,7 +2013,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2112,14 +2112,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2156,7 +2156,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2255,14 +2255,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2299,7 +2299,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-autoscaling-custom-target-utilization-generated.yaml b/operations/mimir-tests/test-autoscaling-custom-target-utilization-generated.yaml
index 256de50cd78..538bb62312c 100644
--- a/operations/mimir-tests/test-autoscaling-custom-target-utilization-generated.yaml
+++ b/operations/mimir-tests/test-autoscaling-custom-target-utilization-generated.yaml
@@ -650,7 +650,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -746,7 +746,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -825,7 +825,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -897,7 +897,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -990,7 +990,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1083,7 +1083,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-querier
ports:
@@ -1165,7 +1165,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-frontend
ports:
@@ -1237,7 +1237,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-scheduler
ports:
@@ -1304,7 +1304,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1403,7 +1403,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1512,7 +1512,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1606,7 +1606,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1660,7 +1660,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1714,7 +1714,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1768,7 +1768,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1812,14 +1812,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1853,7 +1853,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -1957,6 +1957,7 @@ spec:
name: alertmanager
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: cortex_alertmanager_cpu_hpa_default
query: |
max_over_time(
@@ -1966,11 +1967,20 @@ spec:
max by (pod) (up{container="alertmanager",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="alertmanager",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "1780"
name: cortex_alertmanager_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: cortex_alertmanager_memory_hpa_default
query: |
max_over_time(
@@ -1991,6 +2001,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="alertmanager", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="alertmanager",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "9556302233"
name: cortex_alertmanager_memory_hpa_default
@@ -2017,6 +2035,7 @@ spec:
name: distributor
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: cortex_distributor_cpu_hpa_default
query: |
max_over_time(
@@ -2026,11 +2045,20 @@ spec:
max by (pod) (up{container="distributor",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="distributor",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "1780"
name: cortex_distributor_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: cortex_distributor_memory_hpa_default
query: |
max_over_time(
@@ -2051,6 +2079,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="distributor", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="distributor",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "3058016714"
name: cortex_distributor_memory_hpa_default
@@ -2122,6 +2158,7 @@ spec:
name: query-frontend
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: query_frontend_cpu_hpa_default
query: |
max_over_time(
@@ -2131,11 +2168,20 @@ spec:
max by (pod) (up{container="query-frontend",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="query-frontend",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "2225"
name: query_frontend_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: query_frontend_memory_hpa_default
query: |
max_over_time(
@@ -2156,6 +2202,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="query-frontend", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="query-frontend",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "559939584"
name: query_frontend_memory_hpa_default
@@ -2182,6 +2236,7 @@ spec:
name: ruler
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_cpu_hpa_default
query: |
max_over_time(
@@ -2191,11 +2246,20 @@ spec:
max by (pod) (up{container="ruler",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="ruler",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "890"
name: ruler_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_memory_hpa_default
query: |
max_over_time(
@@ -2216,6 +2280,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="ruler",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "5733781340"
name: ruler_memory_hpa_default
@@ -2242,6 +2314,7 @@ spec:
name: ruler-querier
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_querier_cpu_hpa_default
query: |
max_over_time(
@@ -2251,11 +2324,20 @@ spec:
max by (pod) (up{container="ruler-querier",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="ruler-querier",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "178"
name: ruler_querier_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_querier_memory_hpa_default
query: |
max_over_time(
@@ -2276,6 +2358,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler-querier", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="ruler-querier",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "955630223"
name: ruler_querier_memory_hpa_default
@@ -2302,6 +2392,7 @@ spec:
name: ruler-query-frontend
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_query_frontend_cpu_hpa_default
query: |
max_over_time(
@@ -2311,11 +2402,20 @@ spec:
max by (pod) (up{container="ruler-query-frontend",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="ruler-query-frontend",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "1780"
name: ruler_query_frontend_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_query_frontend_memory_hpa_default
query: |
max_over_time(
@@ -2336,6 +2436,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler-query-frontend", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="ruler-query-frontend",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "559939584"
name: ruler_query_frontend_memory_hpa_default
diff --git a/operations/mimir-tests/test-autoscaling-generated.yaml b/operations/mimir-tests/test-autoscaling-generated.yaml
index dbc00d880b1..23c82f37404 100644
--- a/operations/mimir-tests/test-autoscaling-generated.yaml
+++ b/operations/mimir-tests/test-autoscaling-generated.yaml
@@ -650,7 +650,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -746,7 +746,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -825,7 +825,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -897,7 +897,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -990,7 +990,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1083,7 +1083,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-querier
ports:
@@ -1165,7 +1165,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-frontend
ports:
@@ -1237,7 +1237,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-scheduler
ports:
@@ -1304,7 +1304,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1403,7 +1403,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1512,7 +1512,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1606,7 +1606,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1660,7 +1660,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1714,7 +1714,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1768,7 +1768,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1812,14 +1812,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1853,7 +1853,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -1957,6 +1957,7 @@ spec:
name: alertmanager
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: cortex_alertmanager_cpu_hpa_default
query: |
max_over_time(
@@ -1966,11 +1967,20 @@ spec:
max by (pod) (up{container="alertmanager",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="alertmanager",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "2000"
name: cortex_alertmanager_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: cortex_alertmanager_memory_hpa_default
query: |
max_over_time(
@@ -1991,6 +2001,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="alertmanager", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="alertmanager",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "10737418240"
name: cortex_alertmanager_memory_hpa_default
@@ -2017,6 +2035,7 @@ spec:
name: distributor
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: cortex_distributor_cpu_hpa_default
query: |
max_over_time(
@@ -2026,11 +2045,20 @@ spec:
max by (pod) (up{container="distributor",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="distributor",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "2000"
name: cortex_distributor_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: cortex_distributor_memory_hpa_default
query: |
max_over_time(
@@ -2051,6 +2079,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="distributor", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="distributor",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "3435973836"
name: cortex_distributor_memory_hpa_default
@@ -2122,6 +2158,7 @@ spec:
name: query-frontend
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: query_frontend_cpu_hpa_default
query: |
max_over_time(
@@ -2131,11 +2168,20 @@ spec:
max by (pod) (up{container="query-frontend",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="query-frontend",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "1875"
name: query_frontend_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: query_frontend_memory_hpa_default
query: |
max_over_time(
@@ -2156,6 +2202,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="query-frontend", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="query-frontend",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "629145600"
name: query_frontend_memory_hpa_default
@@ -2182,6 +2236,7 @@ spec:
name: ruler
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_cpu_hpa_default
query: |
max_over_time(
@@ -2191,11 +2246,20 @@ spec:
max by (pod) (up{container="ruler",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="ruler",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "1000"
name: ruler_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_memory_hpa_default
query: |
max_over_time(
@@ -2216,6 +2280,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="ruler",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "6442450944"
name: ruler_memory_hpa_default
@@ -2242,6 +2314,7 @@ spec:
name: ruler-querier
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_querier_cpu_hpa_default
query: |
max_over_time(
@@ -2251,11 +2324,20 @@ spec:
max by (pod) (up{container="ruler-querier",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="ruler-querier",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "200"
name: ruler_querier_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_querier_memory_hpa_default
query: |
max_over_time(
@@ -2276,6 +2358,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler-querier", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="ruler-querier",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "1073741824"
name: ruler_querier_memory_hpa_default
@@ -2302,6 +2392,7 @@ spec:
name: ruler-query-frontend
triggers:
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_query_frontend_cpu_hpa_default
query: |
max_over_time(
@@ -2311,11 +2402,20 @@ spec:
max by (pod) (up{container="ruler-query-frontend",namespace="default"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="ruler-query-frontend",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "2000"
name: ruler_query_frontend_cpu_hpa_default
type: prometheus
- metadata:
+ ignoreNullValues: "false"
metricName: ruler_query_frontend_memory_hpa_default
query: |
max_over_time(
@@ -2336,6 +2436,14 @@ spec:
max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler-query-frontend", namespace="default", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="ruler-query-frontend",namespace="default"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
serverAddress: http://prometheus.default:9090/prometheus
threshold: "629145600"
name: ruler_query_frontend_memory_hpa_default
diff --git a/operations/mimir-tests/test-compactor-concurrent-rollout-generated.yaml b/operations/mimir-tests/test-compactor-concurrent-rollout-generated.yaml
new file mode 100644
index 00000000000..f235c116d76
--- /dev/null
+++ b/operations/mimir-tests/test-compactor-concurrent-rollout-generated.yaml
@@ -0,0 +1,1443 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: default
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: compactor
+ name: compactor
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: compactor
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: distributor
+ name: distributor
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: distributor
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: ingester
+ name: ingester
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: ingester
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: memcached
+ name: memcached
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: memcached
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: memcached-frontend
+ name: memcached-frontend
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: memcached-frontend
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: memcached-index-queries
+ name: memcached-index-queries
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: memcached-index-queries
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: memcached-metadata
+ name: memcached-metadata
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: memcached-metadata
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: querier
+ name: querier
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: querier
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: query-frontend
+ name: query-frontend
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: query-frontend
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: query-scheduler
+ name: query-scheduler
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: query-scheduler
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: rollout-operator
+ name: rollout-operator
+ namespace: default
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ name: rollout-operator
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ name: store-gateway
+ name: store-gateway
+ namespace: default
+spec:
+ maxUnavailable: 2
+ selector:
+ matchLabels:
+ name: store-gateway
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rollout-operator
+ namespace: default
+---
+apiVersion: v1
+data:
+ overrides.yaml: |
+ overrides: {}
+kind: ConfigMap
+metadata:
+ name: overrides
+ namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: rollout-operator-role
+ namespace: default
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - list
+ - get
+ - watch
+ - delete
+- apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - list
+ - get
+ - watch
+ - patch
+- apiGroups:
+ - apps
+ resources:
+ - statefulsets/status
+ verbs:
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: rollout-operator-rolebinding
+ namespace: default
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rollout-operator-role
+subjects:
+- kind: ServiceAccount
+ name: rollout-operator
+ namespace: default
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: compactor
+ name: compactor
+ namespace: default
+spec:
+ clusterIP: None
+ ports:
+ - name: compactor-http-metrics
+ port: 8080
+ targetPort: 8080
+ - name: compactor-grpc
+ port: 9095
+ targetPort: 9095
+ - name: compactor-gossip-ring
+ port: 7946
+ targetPort: 7946
+ selector:
+ name: compactor
+ rollout-group: compactor
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: distributor
+ name: distributor
+ namespace: default
+spec:
+ clusterIP: None
+ ports:
+ - name: distributor-http-metrics
+ port: 8080
+ targetPort: 8080
+ - name: distributor-grpc
+ port: 9095
+ targetPort: 9095
+ - name: distributor-gossip-ring
+ port: 7946
+ targetPort: 7946
+ selector:
+ name: distributor
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: gossip-ring
+ namespace: default
+spec:
+ clusterIP: None
+ ports:
+ - appProtocol: tcp
+ name: gossip-ring
+ port: 7946
+ protocol: TCP
+ targetPort: 7946
+ selector:
+ gossip_ring_member: "true"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: ingester
+ name: ingester
+ namespace: default
+spec:
+ ports:
+ - name: ingester-http-metrics
+ port: 8080
+ targetPort: 8080
+ - name: ingester-grpc
+ port: 9095
+ targetPort: 9095
+ - name: ingester-gossip-ring
+ port: 7946
+ targetPort: 7946
+ selector:
+ name: ingester
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: memcached
+ name: memcached
+ namespace: default
+spec:
+ clusterIP: None
+ ports:
+ - name: memcached-client
+ port: 11211
+ targetPort: 11211
+ - name: exporter-http-metrics
+ port: 9150
+ targetPort: 9150
+ selector:
+ name: memcached
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: memcached-frontend
+ name: memcached-frontend
+ namespace: default
+spec:
+ clusterIP: None
+ ports:
+ - name: memcached-client
+ port: 11211
+ targetPort: 11211
+ - name: exporter-http-metrics
+ port: 9150
+ targetPort: 9150
+ selector:
+ name: memcached-frontend
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: memcached-index-queries
+ name: memcached-index-queries
+ namespace: default
+spec:
+ clusterIP: None
+ ports:
+ - name: memcached-client
+ port: 11211
+ targetPort: 11211
+ - name: exporter-http-metrics
+ port: 9150
+ targetPort: 9150
+ selector:
+ name: memcached-index-queries
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: memcached-metadata
+ name: memcached-metadata
+ namespace: default
+spec:
+ clusterIP: None
+ ports:
+ - name: memcached-client
+ port: 11211
+ targetPort: 11211
+ - name: exporter-http-metrics
+ port: 9150
+ targetPort: 9150
+ selector:
+ name: memcached-metadata
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: querier
+ name: querier
+ namespace: default
+spec:
+ ports:
+ - name: querier-http-metrics
+ port: 8080
+ targetPort: 8080
+ - name: querier-grpc
+ port: 9095
+ targetPort: 9095
+ - name: querier-gossip-ring
+ port: 7946
+ targetPort: 7946
+ selector:
+ name: querier
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: query-frontend
+ name: query-frontend
+ namespace: default
+spec:
+ ports:
+ - name: query-frontend-http-metrics
+ port: 8080
+ targetPort: 8080
+ - name: query-frontend-grpc
+ port: 9095
+ targetPort: 9095
+ selector:
+ name: query-frontend
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: query-scheduler
+ name: query-scheduler
+ namespace: default
+spec:
+ ports:
+ - name: query-scheduler-http-metrics
+ port: 8080
+ targetPort: 8080
+ - name: query-scheduler-grpc
+ port: 9095
+ targetPort: 9095
+ selector:
+ name: query-scheduler
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: query-scheduler
+ name: query-scheduler-discovery
+ namespace: default
+spec:
+ clusterIP: None
+ ports:
+ - name: query-scheduler-http-metrics
+ port: 8080
+ targetPort: 8080
+ - name: query-scheduler-grpc
+ port: 9095
+ targetPort: 9095
+ publishNotReadyAddresses: true
+ selector:
+ name: query-scheduler
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: store-gateway
+ name: store-gateway
+ namespace: default
+spec:
+ ports:
+ - name: store-gateway-http-metrics
+ port: 8080
+ targetPort: 8080
+ - name: store-gateway-grpc
+ port: 9095
+ targetPort: 9095
+ - name: store-gateway-gossip-ring
+ port: 7946
+ targetPort: 7946
+ selector:
+ name: store-gateway
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: distributor
+ namespace: default
+spec:
+ minReadySeconds: 10
+ replicas: 3
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ name: distributor
+ strategy:
+ rollingUpdate:
+ maxSurge: 15%
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ gossip_ring_member: "true"
+ name: distributor
+ spec:
+ containers:
+ - args:
+ - -distributor.ha-tracker.enable=true
+ - -distributor.ha-tracker.enable-for-all-users=true
+ - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379
+ - -distributor.ha-tracker.prefix=prom_ha/
+ - -distributor.ha-tracker.store=etcd
+ - -distributor.health-check-ingesters=true
+ - -distributor.ingestion-burst-size=200000
+ - -distributor.ingestion-rate-limit=10000
+ - -distributor.ring.heartbeat-period=1m
+ - -distributor.ring.heartbeat-timeout=4m
+ - -distributor.ring.prefix=
+ - -distributor.ring.store=memberlist
+ - -ingester.ring.heartbeat-timeout=10m
+ - -ingester.ring.prefix=
+ - -ingester.ring.replication-factor=3
+ - -ingester.ring.store=memberlist
+ - -mem-ballast-size-bytes=1073741824
+ - -memberlist.bind-port=7946
+ - -memberlist.join=dns+gossip-ring.default.svc.cluster.local.:7946
+ - -runtime-config.file=/etc/mimir/overrides.yaml
+ - -server.grpc.keepalive.max-connection-age=60s
+ - -server.grpc.keepalive.max-connection-age-grace=5m
+ - -server.grpc.keepalive.max-connection-idle=1m
+ - -server.grpc.keepalive.min-time-between-pings=10s
+ - -server.grpc.keepalive.ping-without-stream-allowed=true
+ - -server.http-listen-port=8080
+ - -shutdown-delay=90s
+ - -target=distributor
+ - -usage-stats.installation-mode=jsonnet
+ env:
+ - name: GOMAXPROCS
+ value: "8"
+ - name: JAEGER_REPORTER_MAX_QUEUE_SIZE
+ value: "1000"
+ image: grafana/mimir:2.12.0
+ imagePullPolicy: IfNotPresent
+ name: distributor
+ ports:
+ - containerPort: 8080
+ name: http-metrics
+ - containerPort: 9095
+ name: grpc
+ - containerPort: 7946
+ name: gossip-ring
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ limits:
+ memory: 4Gi
+ requests:
+ cpu: "2"
+ memory: 2Gi
+ volumeMounts:
+ - mountPath: /etc/mimir
+ name: overrides
+ terminationGracePeriodSeconds: 100
+ topologySpreadConstraints:
+ - labelSelector:
+ matchLabels:
+ name: distributor
+ maxSkew: 1
+ topologyKey: kubernetes.io/hostname
+ whenUnsatisfiable: ScheduleAnyway
+ volumes:
+ - configMap:
+ name: overrides
+ name: overrides
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: querier
+ namespace: default
+spec:
+ minReadySeconds: 10
+ replicas: 6
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ name: querier
+ strategy:
+ rollingUpdate:
+ maxSurge: 15%
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ gossip_ring_member: "true"
+ name: querier
+ spec:
+ containers:
+ - args:
+ - -blocks-storage.bucket-store.metadata-cache.backend=memcached
+ - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
+ - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
+ - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576
+ - -blocks-storage.bucket-store.sync-dir=/data/tsdb
+ - -blocks-storage.bucket-store.sync-interval=15m
+ - -blocks-storage.gcs.bucket-name=blocks-bucket
+ - -common.storage.backend=gcs
+ - -distributor.health-check-ingesters=true
+ - -ingester.ring.heartbeat-timeout=10m
+ - -ingester.ring.prefix=
+ - -ingester.ring.replication-factor=3
+ - -ingester.ring.store=memberlist
+ - -mem-ballast-size-bytes=268435456
+ - -memberlist.bind-port=7946
+ - -memberlist.join=dns+gossip-ring.default.svc.cluster.local.:7946
+ - -querier.frontend-client.grpc-max-send-msg-size=104857600
+ - -querier.max-concurrent=8
+ - -querier.max-partial-query-length=768h
+ - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local.:9095
+ - -runtime-config.file=/etc/mimir/overrides.yaml
+ - -server.grpc.keepalive.min-time-between-pings=10s
+ - -server.grpc.keepalive.ping-without-stream-allowed=true
+ - -server.http-listen-port=8080
+ - -store-gateway.sharding-ring.heartbeat-timeout=4m
+ - -store-gateway.sharding-ring.prefix=
+ - -store-gateway.sharding-ring.replication-factor=3
+ - -store-gateway.sharding-ring.store=memberlist
+ - -target=querier
+ - -usage-stats.installation-mode=jsonnet
+ env:
+ - name: GOMAXPROCS
+ value: "5"
+ - name: JAEGER_REPORTER_MAX_QUEUE_SIZE
+ value: "5000"
+ image: grafana/mimir:2.12.0
+ imagePullPolicy: IfNotPresent
+ name: querier
+ ports:
+ - containerPort: 8080
+ name: http-metrics
+ - containerPort: 9095
+ name: grpc
+ - containerPort: 7946
+ name: gossip-ring
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ limits:
+ memory: 24Gi
+ requests:
+ cpu: "1"
+ memory: 12Gi
+ volumeMounts:
+ - mountPath: /etc/mimir
+ name: overrides
+ topologySpreadConstraints:
+ - labelSelector:
+ matchLabels:
+ name: querier
+ maxSkew: 1
+ topologyKey: kubernetes.io/hostname
+ whenUnsatisfiable: ScheduleAnyway
+ volumes:
+ - configMap:
+ name: overrides
+ name: overrides
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: query-frontend
+ namespace: default
+spec:
+ minReadySeconds: 10
+ replicas: 2
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ name: query-frontend
+ strategy:
+ rollingUpdate:
+ maxSurge: 15%
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ name: query-frontend
+ spec:
+ containers:
+ - args:
+ - -query-frontend.cache-results=true
+ - -query-frontend.max-cache-freshness=10m
+ - -query-frontend.max-total-query-length=12000h
+ - -query-frontend.query-sharding-target-series-per-shard=2500
+ - -query-frontend.results-cache.backend=memcached
+ - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local.:11211
+ - -query-frontend.results-cache.memcached.max-item-size=5242880
+ - -query-frontend.results-cache.memcached.timeout=500ms
+ - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local.:9095
+ - -runtime-config.file=/etc/mimir/overrides.yaml
+ - -server.grpc.keepalive.max-connection-age=30s
+ - -server.grpc.keepalive.min-time-between-pings=10s
+ - -server.grpc.keepalive.ping-without-stream-allowed=true
+ - -server.http-listen-port=8080
+ - -shutdown-delay=90s
+ - -target=query-frontend
+ - -usage-stats.installation-mode=jsonnet
+ env:
+ - name: JAEGER_REPORTER_MAX_QUEUE_SIZE
+ value: "5000"
+ image: grafana/mimir:2.12.0
+ imagePullPolicy: IfNotPresent
+ name: query-frontend
+ ports:
+ - containerPort: 8080
+ name: http-metrics
+ - containerPort: 9095
+ name: grpc
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ limits:
+ memory: 1200Mi
+ requests:
+ cpu: "2"
+ memory: 600Mi
+ volumeMounts:
+ - mountPath: /etc/mimir
+ name: overrides
+ terminationGracePeriodSeconds: 390
+ topologySpreadConstraints:
+ - labelSelector:
+ matchLabels:
+ name: query-frontend
+ maxSkew: 1
+ topologyKey: kubernetes.io/hostname
+ whenUnsatisfiable: ScheduleAnyway
+ volumes:
+ - configMap:
+ name: overrides
+ name: overrides
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: query-scheduler
+ namespace: default
+spec:
+ minReadySeconds: 10
+ replicas: 2
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ name: query-scheduler
+ strategy:
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ name: query-scheduler
+ spec:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ name: query-scheduler
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - args:
+ - -query-scheduler.max-outstanding-requests-per-tenant=100
+ - -server.grpc.keepalive.min-time-between-pings=10s
+ - -server.grpc.keepalive.ping-without-stream-allowed=true
+ - -server.http-listen-port=8080
+ - -target=query-scheduler
+ - -usage-stats.installation-mode=jsonnet
+ image: grafana/mimir:2.12.0
+ imagePullPolicy: IfNotPresent
+ name: query-scheduler
+ ports:
+ - containerPort: 8080
+ name: http-metrics
+ - containerPort: 9095
+ name: grpc
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ cpu: "2"
+ memory: 1Gi
+ volumeMounts:
+ - mountPath: /etc/mimir
+ name: overrides
+ volumes:
+ - configMap:
+ name: overrides
+ name: overrides
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rollout-operator
+ namespace: default
+spec:
+ minReadySeconds: 10
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ name: rollout-operator
+ strategy:
+ rollingUpdate:
+ maxSurge: 0
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ name: rollout-operator
+ spec:
+ containers:
+ - args:
+ - -kubernetes.namespace=default
+ image: grafana/rollout-operator:v0.13.0
+ imagePullPolicy: IfNotPresent
+ name: rollout-operator
+ ports:
+ - containerPort: 8001
+ name: http-metrics
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8001
+ initialDelaySeconds: 5
+ timeoutSeconds: 1
+ resources:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ serviceAccountName: rollout-operator
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ annotations:
+ rollout-max-unavailable: "8"
+ labels:
+ name: compactor
+ rollout-group: compactor
+ name: compactor
+ namespace: default
+spec:
+ podManagementPolicy: Parallel
+ replicas: 16
+ selector:
+ matchLabels:
+ name: compactor
+ rollout-group: compactor
+ serviceName: compactor
+ template:
+ metadata:
+ labels:
+ gossip_ring_member: "true"
+ name: compactor
+ rollout-group: compactor
+ spec:
+ containers:
+ - args:
+ - -blocks-storage.gcs.bucket-name=blocks-bucket
+ - -common.storage.backend=gcs
+ - -compactor.block-ranges=2h,12h,24h
+ - -compactor.blocks-retention-period=0
+ - -compactor.cleanup-interval=15m
+ - -compactor.compaction-concurrency=1
+ - -compactor.compaction-interval=30m
+ - -compactor.compactor-tenant-shard-size=1
+ - -compactor.data-dir=/data
+ - -compactor.deletion-delay=2h
+ - -compactor.first-level-compaction-wait-period=25m
+ - -compactor.max-closing-blocks-concurrency=2
+ - -compactor.max-opening-blocks-concurrency=4
+ - -compactor.ring.heartbeat-period=1m
+ - -compactor.ring.heartbeat-timeout=4m
+ - -compactor.ring.prefix=
+ - -compactor.ring.store=memberlist
+ - -compactor.ring.wait-stability-min-duration=1m
+ - -compactor.split-and-merge-shards=0
+ - -compactor.split-groups=1
+ - -compactor.symbols-flushers-concurrency=4
+ - -memberlist.bind-port=7946
+ - -memberlist.join=dns+gossip-ring.default.svc.cluster.local.:7946
+ - -runtime-config.file=/etc/mimir/overrides.yaml
+ - -server.grpc.keepalive.min-time-between-pings=10s
+ - -server.grpc.keepalive.ping-without-stream-allowed=true
+ - -server.http-listen-port=8080
+ - -target=compactor
+ - -usage-stats.installation-mode=jsonnet
+ image: grafana/mimir:2.12.0
+ imagePullPolicy: IfNotPresent
+ name: compactor
+ ports:
+ - containerPort: 8080
+ name: http-metrics
+ - containerPort: 9095
+ name: grpc
+ - containerPort: 7946
+ name: gossip-ring
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ limits:
+ memory: 6Gi
+ requests:
+ cpu: 1
+ memory: 6Gi
+ volumeMounts:
+ - mountPath: /data
+ name: compactor-data
+ - mountPath: /etc/mimir
+ name: overrides
+ securityContext:
+ runAsUser: 0
+ terminationGracePeriodSeconds: 900
+ volumes:
+ - configMap:
+ name: overrides
+ name: overrides
+ updateStrategy:
+ type: OnDelete
+ volumeClaimTemplates:
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: compactor-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 250Gi
+ storageClassName: standard
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ name: ingester
+ name: ingester
+ namespace: default
+spec:
+ podManagementPolicy: Parallel
+ replicas: 3
+ selector:
+ matchLabels:
+ name: ingester
+ serviceName: ingester
+ template:
+ metadata:
+ labels:
+ gossip_ring_member: "true"
+ name: ingester
+ spec:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ name: ingester
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - args:
+ - -blocks-storage.gcs.bucket-name=blocks-bucket
+ - -blocks-storage.tsdb.block-ranges-period=2h
+ - -blocks-storage.tsdb.dir=/data/tsdb
+ - -blocks-storage.tsdb.head-compaction-interval=15m
+ - -blocks-storage.tsdb.ship-interval=1m
+ - -blocks-storage.tsdb.wal-replay-concurrency=3
+ - -common.storage.backend=gcs
+ - -distributor.health-check-ingesters=true
+ - -ingester.max-global-metadata-per-metric=10
+ - -ingester.max-global-metadata-per-user=30000
+ - -ingester.max-global-series-per-user=150000
+ - -ingester.ring.heartbeat-period=2m
+ - -ingester.ring.heartbeat-timeout=10m
+ - -ingester.ring.num-tokens=512
+ - -ingester.ring.prefix=
+ - -ingester.ring.replication-factor=3
+ - -ingester.ring.store=memberlist
+ - -ingester.ring.tokens-file-path=/data/tokens
+ - -ingester.ring.unregister-on-shutdown=true
+ - -memberlist.bind-port=7946
+ - -memberlist.join=dns+gossip-ring.default.svc.cluster.local.:7946
+ - -runtime-config.file=/etc/mimir/overrides.yaml
+ - -server.grpc-max-concurrent-streams=500
+ - -server.grpc.keepalive.min-time-between-pings=10s
+ - -server.grpc.keepalive.ping-without-stream-allowed=true
+ - -server.http-listen-port=8080
+ - -target=ingester
+ - -usage-stats.installation-mode=jsonnet
+ env:
+ - name: JAEGER_REPORTER_MAX_QUEUE_SIZE
+ value: "1000"
+ image: grafana/mimir:2.12.0
+ imagePullPolicy: IfNotPresent
+ name: ingester
+ ports:
+ - containerPort: 8080
+ name: http-metrics
+ - containerPort: 9095
+ name: grpc
+ - containerPort: 7946
+ name: gossip-ring
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ limits:
+ memory: 25Gi
+ requests:
+ cpu: "4"
+ memory: 15Gi
+ volumeMounts:
+ - mountPath: /data
+ name: ingester-data
+ - mountPath: /etc/mimir
+ name: overrides
+ securityContext:
+ runAsUser: 0
+ terminationGracePeriodSeconds: 1200
+ volumes:
+ - configMap:
+ name: overrides
+ name: overrides
+ updateStrategy:
+ type: RollingUpdate
+ volumeClaimTemplates:
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ingester-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Gi
+ storageClassName: fast
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: memcached
+ namespace: default
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ name: memcached
+ serviceName: memcached
+ template:
+ metadata:
+ labels:
+ name: memcached
+ spec:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ name: memcached
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - args:
+ - -m 6144
+ - -I 1m
+ - -c 16384
+ - -v
+ - --extended=track_sizes
+ image: memcached:1.6.22-alpine
+ imagePullPolicy: IfNotPresent
+ name: memcached
+ ports:
+ - containerPort: 11211
+ name: client
+ resources:
+ limits:
+ memory: 9Gi
+ requests:
+ cpu: 500m
+ memory: 6552Mi
+ - args:
+ - --memcached.address=localhost:11211
+ - --web.listen-address=0.0.0.0:9150
+ image: prom/memcached-exporter:v0.14.3
+ imagePullPolicy: IfNotPresent
+ name: exporter
+ ports:
+ - containerPort: 9150
+ name: http-metrics
+ updateStrategy:
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: memcached-frontend
+ namespace: default
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ name: memcached-frontend
+ serviceName: memcached-frontend
+ template:
+ metadata:
+ labels:
+ name: memcached-frontend
+ spec:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ name: memcached-frontend
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - args:
+ - -m 1024
+ - -I 5m
+ - -c 16384
+ - -v
+ - --extended=track_sizes
+ image: memcached:1.6.22-alpine
+ imagePullPolicy: IfNotPresent
+ name: memcached
+ ports:
+ - containerPort: 11211
+ name: client
+ resources:
+ limits:
+ memory: 1536Mi
+ requests:
+ cpu: 500m
+ memory: 1176Mi
+ - args:
+ - --memcached.address=localhost:11211
+ - --web.listen-address=0.0.0.0:9150
+ image: prom/memcached-exporter:v0.14.3
+ imagePullPolicy: IfNotPresent
+ name: exporter
+ ports:
+ - containerPort: 9150
+ name: http-metrics
+ updateStrategy:
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: memcached-index-queries
+ namespace: default
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ name: memcached-index-queries
+ serviceName: memcached-index-queries
+ template:
+ metadata:
+ labels:
+ name: memcached-index-queries
+ spec:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ name: memcached-index-queries
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - args:
+ - -m 1024
+ - -I 5m
+ - -c 16384
+ - -v
+ - --extended=track_sizes
+ image: memcached:1.6.22-alpine
+ imagePullPolicy: IfNotPresent
+ name: memcached
+ ports:
+ - containerPort: 11211
+ name: client
+ resources:
+ limits:
+ memory: 1536Mi
+ requests:
+ cpu: 500m
+ memory: 1176Mi
+ - args:
+ - --memcached.address=localhost:11211
+ - --web.listen-address=0.0.0.0:9150
+ image: prom/memcached-exporter:v0.14.3
+ imagePullPolicy: IfNotPresent
+ name: exporter
+ ports:
+ - containerPort: 9150
+ name: http-metrics
+ updateStrategy:
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: memcached-metadata
+ namespace: default
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ name: memcached-metadata
+ serviceName: memcached-metadata
+ template:
+ metadata:
+ labels:
+ name: memcached-metadata
+ spec:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ name: memcached-metadata
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - args:
+ - -m 512
+ - -I 1m
+ - -c 16384
+ - -v
+ - --extended=track_sizes
+ image: memcached:1.6.22-alpine
+ imagePullPolicy: IfNotPresent
+ name: memcached
+ ports:
+ - containerPort: 11211
+ name: client
+ resources:
+ limits:
+ memory: 768Mi
+ requests:
+ cpu: 500m
+ memory: 638Mi
+ - args:
+ - --memcached.address=localhost:11211
+ - --web.listen-address=0.0.0.0:9150
+ image: prom/memcached-exporter:v0.14.3
+ imagePullPolicy: IfNotPresent
+ name: exporter
+ ports:
+ - containerPort: 9150
+ name: http-metrics
+ updateStrategy:
+ type: RollingUpdate
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ name: store-gateway
+ name: store-gateway
+ namespace: default
+spec:
+ podManagementPolicy: Parallel
+ replicas: 3
+ selector:
+ matchLabels:
+ name: store-gateway
+ serviceName: store-gateway
+ template:
+ metadata:
+ labels:
+ gossip_ring_member: "true"
+ name: store-gateway
+ spec:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ name: store-gateway
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - args:
+ - -blocks-storage.bucket-store.chunks-cache.backend=memcached
+ - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local.:11211
+ - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50
+ - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
+ - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
+ - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
+ - -blocks-storage.bucket-store.index-cache.backend=memcached
+ - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
+ - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
+ - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
+ - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
+ - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
+ - -blocks-storage.bucket-store.metadata-cache.backend=memcached
+ - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
+ - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
+ - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100
+ - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=150
+ - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576
+ - -blocks-storage.bucket-store.sync-dir=/data/tsdb
+ - -blocks-storage.bucket-store.sync-interval=15m
+ - -blocks-storage.gcs.bucket-name=blocks-bucket
+ - -common.storage.backend=gcs
+ - -memberlist.bind-port=7946
+ - -memberlist.join=dns+gossip-ring.default.svc.cluster.local.:7946
+ - -runtime-config.file=/etc/mimir/overrides.yaml
+ - -server.grpc.keepalive.min-time-between-pings=10s
+ - -server.grpc.keepalive.ping-without-stream-allowed=true
+ - -server.http-listen-port=8080
+ - -store-gateway.sharding-ring.heartbeat-period=1m
+ - -store-gateway.sharding-ring.heartbeat-timeout=4m
+ - -store-gateway.sharding-ring.prefix=
+ - -store-gateway.sharding-ring.replication-factor=3
+ - -store-gateway.sharding-ring.store=memberlist
+ - -store-gateway.sharding-ring.tokens-file-path=/data/tokens
+ - -store-gateway.sharding-ring.unregister-on-shutdown=false
+ - -store-gateway.sharding-ring.wait-stability-min-duration=1m
+ - -target=store-gateway
+ - -usage-stats.installation-mode=jsonnet
+ env:
+ - name: GOMAXPROCS
+ value: "5"
+ - name: GOMEMLIMIT
+ value: "12884901888"
+ - name: JAEGER_REPORTER_MAX_QUEUE_SIZE
+ value: "1000"
+ image: grafana/mimir:2.12.0
+ imagePullPolicy: IfNotPresent
+ name: store-gateway
+ ports:
+ - containerPort: 8080
+ name: http-metrics
+ - containerPort: 9095
+ name: grpc
+ - containerPort: 7946
+ name: gossip-ring
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ limits:
+ memory: 18Gi
+ requests:
+ cpu: "1"
+ memory: 12Gi
+ volumeMounts:
+ - mountPath: /data
+ name: store-gateway-data
+ - mountPath: /etc/mimir
+ name: overrides
+ securityContext:
+ runAsUser: 0
+ terminationGracePeriodSeconds: 120
+ volumes:
+ - configMap:
+ name: overrides
+ name: overrides
+ updateStrategy:
+ type: RollingUpdate
+ volumeClaimTemplates:
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: store-gateway-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 50Gi
+ storageClassName: standard
+---
+apiVersion: etcd.database.coreos.com/v1beta2
+kind: EtcdCluster
+metadata:
+ annotations:
+ etcd.database.coreos.com/scope: clusterwide
+ name: etcd
+ namespace: default
+spec:
+ pod:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ etcd_cluster: etcd
+ topologyKey: kubernetes.io/hostname
+ annotations:
+ prometheus.io/port: "2379"
+ prometheus.io/scrape: "true"
+ etcdEnv:
+ - name: ETCD_AUTO_COMPACTION_RETENTION
+ value: 1h
+ labels:
+ name: etcd
+ resources:
+ limits:
+ memory: 512Mi
+ requests:
+ cpu: 500m
+ memory: 512Mi
+ size: 3
+ version: 3.3.13
diff --git a/operations/mimir-tests/test-compactor-concurrent-rollout.jsonnet b/operations/mimir-tests/test-compactor-concurrent-rollout.jsonnet
new file mode 100644
index 00000000000..2b40f8cec0b
--- /dev/null
+++ b/operations/mimir-tests/test-compactor-concurrent-rollout.jsonnet
@@ -0,0 +1,16 @@
+local mimir = import 'mimir/mimir.libsonnet';
+
+mimir {
+ _config+:: {
+ namespace: 'default',
+ external_url: 'http://test',
+
+ storage_backend: 'gcs',
+ blocks_storage_bucket_name: 'blocks-bucket',
+
+ cortex_compactor_concurrent_rollout_enabled: true,
+ },
+
+ compactor_statefulset+:
+ $.apps.v1.statefulSet.mixin.spec.withReplicas(16),
+}
diff --git a/operations/mimir-tests/test-consul-generated.yaml b/operations/mimir-tests/test-consul-generated.yaml
index e30515568b8..753c1a8f3df 100644
--- a/operations/mimir-tests/test-consul-generated.yaml
+++ b/operations/mimir-tests/test-consul-generated.yaml
@@ -922,7 +922,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1016,7 +1016,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1094,7 +1094,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1166,7 +1166,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1258,7 +1258,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1333,7 +1333,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1428,7 +1428,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1533,7 +1533,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1625,7 +1625,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1679,7 +1679,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1733,7 +1733,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1787,7 +1787,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1830,14 +1830,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1870,7 +1870,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-consul-multi-zone-generated.yaml b/operations/mimir-tests/test-consul-multi-zone-generated.yaml
index 1b89bb58e19..030d54eb780 100644
--- a/operations/mimir-tests/test-consul-multi-zone-generated.yaml
+++ b/operations/mimir-tests/test-consul-multi-zone-generated.yaml
@@ -1087,7 +1087,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1183,7 +1183,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1261,7 +1261,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1333,7 +1333,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1471,7 +1471,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1546,7 +1546,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1641,7 +1641,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1759,7 +1759,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1877,7 +1877,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1995,7 +1995,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -2087,7 +2087,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2141,7 +2141,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2195,7 +2195,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2249,7 +2249,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2303,14 +2303,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2345,7 +2345,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2437,14 +2437,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2479,7 +2479,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2571,14 +2571,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2613,7 +2613,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml b/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml
index fd9e0b1cd7b..cfa803c4b8c 100644
--- a/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml
+++ b/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml
@@ -891,7 +891,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -985,7 +985,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1063,7 +1063,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1135,7 +1135,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1201,7 +1201,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1296,7 +1296,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1401,7 +1401,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1493,7 +1493,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1547,7 +1547,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1601,7 +1601,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1655,7 +1655,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1698,14 +1698,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1738,7 +1738,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-continuous-test-generated.yaml b/operations/mimir-tests/test-continuous-test-generated.yaml
index 9036229ac1c..843b668bdce 100644
--- a/operations/mimir-tests/test-continuous-test-generated.yaml
+++ b/operations/mimir-tests/test-continuous-test-generated.yaml
@@ -451,7 +451,7 @@ spec:
- -tests.write-endpoint=http://distributor.default.svc.cluster.local
- -tests.write-read-series-test.max-query-age=48h
- -tests.write-read-series-test.num-series=1000
- image: grafana/mimir-continuous-test:2.11.0
+ image: grafana/mimir-continuous-test:2.12.0
imagePullPolicy: IfNotPresent
name: continuous-test
ports:
@@ -522,7 +522,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -619,7 +619,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -699,7 +699,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -771,7 +771,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -850,7 +850,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -959,7 +959,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1053,7 +1053,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1107,7 +1107,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1161,7 +1161,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1215,7 +1215,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1259,14 +1259,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1300,7 +1300,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-defaults-generated.yaml b/operations/mimir-tests/test-defaults-generated.yaml
index 7d899fb04cb..9545ba73dd6 100644
--- a/operations/mimir-tests/test-defaults-generated.yaml
+++ b/operations/mimir-tests/test-defaults-generated.yaml
@@ -468,7 +468,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -565,7 +565,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -645,7 +645,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -717,7 +717,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -796,7 +796,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -905,7 +905,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -999,7 +999,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1053,7 +1053,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1107,7 +1107,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1161,7 +1161,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1205,14 +1205,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1246,7 +1246,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-deployment-mode-migration-generated.yaml b/operations/mimir-tests/test-deployment-mode-migration-generated.yaml
index 176c7f351ec..4fc285d1203 100644
--- a/operations/mimir-tests/test-deployment-mode-migration-generated.yaml
+++ b/operations/mimir-tests/test-deployment-mode-migration-generated.yaml
@@ -983,7 +983,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1095,7 +1095,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-read
ports:
@@ -1196,7 +1196,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1281,7 +1281,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1361,7 +1361,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1501,7 +1501,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1578,7 +1578,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1677,7 +1677,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1799,7 +1799,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1921,7 +1921,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -2043,7 +2043,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -2137,7 +2137,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2191,7 +2191,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2245,7 +2245,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2299,7 +2299,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2359,14 +2359,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2450,7 +2450,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -2550,14 +2550,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2641,7 +2641,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -2741,14 +2741,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2832,7 +2832,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -2969,7 +2969,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -3106,7 +3106,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -3243,7 +3243,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -3338,14 +3338,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -3381,7 +3381,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -3476,14 +3476,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -3519,7 +3519,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -3614,14 +3614,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -3657,7 +3657,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-env-vars-generated.yaml b/operations/mimir-tests/test-env-vars-generated.yaml
index 7527a381d74..d21c0805bd6 100644
--- a/operations/mimir-tests/test-env-vars-generated.yaml
+++ b/operations/mimir-tests/test-env-vars-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -631,7 +631,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -711,7 +711,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -783,7 +783,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -875,7 +875,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -952,7 +952,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1051,7 +1051,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1164,7 +1164,7 @@ spec:
value: 1Gi
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1258,7 +1258,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1312,7 +1312,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1366,7 +1366,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1420,7 +1420,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1464,14 +1464,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1507,7 +1507,7 @@ spec:
value: 2Gi
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-etcd-replicas-generated.yaml b/operations/mimir-tests/test-etcd-replicas-generated.yaml
index ee6429cd5f2..667c0653923 100644
--- a/operations/mimir-tests/test-etcd-replicas-generated.yaml
+++ b/operations/mimir-tests/test-etcd-replicas-generated.yaml
@@ -468,7 +468,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -565,7 +565,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -645,7 +645,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -717,7 +717,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -796,7 +796,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -905,7 +905,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -999,7 +999,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1053,7 +1053,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1107,7 +1107,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1161,7 +1161,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1205,14 +1205,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1246,7 +1246,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-extra-runtime-config-generated.yaml b/operations/mimir-tests/test-extra-runtime-config-generated.yaml
index 63a807f1b30..3c224fbf20f 100644
--- a/operations/mimir-tests/test-extra-runtime-config-generated.yaml
+++ b/operations/mimir-tests/test-extra-runtime-config-generated.yaml
@@ -542,7 +542,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -644,7 +644,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -729,7 +729,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -806,7 +806,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -903,7 +903,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -985,7 +985,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1089,7 +1089,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1203,7 +1203,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1302,7 +1302,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1356,7 +1356,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1410,7 +1410,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1464,7 +1464,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1508,14 +1508,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1549,7 +1549,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-helm-parity-generated.yaml b/operations/mimir-tests/test-helm-parity-generated.yaml
index b1fa20abded..625624ce2bf 100644
--- a/operations/mimir-tests/test-helm-parity-generated.yaml
+++ b/operations/mimir-tests/test-helm-parity-generated.yaml
@@ -574,7 +574,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -648,7 +648,7 @@ spec:
- -runtime-config.file=/etc/mimir/overrides.yaml
- -server.http-listen-port=8080
- -target=overrides-exporter
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: overrides-exporter
ports:
@@ -730,7 +730,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -813,7 +813,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -885,7 +885,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -976,7 +976,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1054,7 +1054,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1158,7 +1158,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1267,7 +1267,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1361,7 +1361,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1415,7 +1415,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1469,7 +1469,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1523,7 +1523,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1567,14 +1567,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1608,7 +1608,7 @@ spec:
value: "536870912"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-cluster-label-migration-step-0-before-generated.yaml b/operations/mimir-tests/test-memberlist-cluster-label-migration-step-0-before-generated.yaml
index ca450b33c80..61d6c51a63d 100644
--- a/operations/mimir-tests/test-memberlist-cluster-label-migration-step-0-before-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-cluster-label-migration-step-0-before-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -631,7 +631,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -711,7 +711,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -783,7 +783,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -875,7 +875,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -952,7 +952,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1051,7 +1051,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1160,7 +1160,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1254,7 +1254,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1308,7 +1308,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1362,7 +1362,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1416,7 +1416,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1460,14 +1460,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1501,7 +1501,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-cluster-label-migration-step-1-generated.yaml b/operations/mimir-tests/test-memberlist-cluster-label-migration-step-1-generated.yaml
index 038b4668db1..fb57d4ab820 100644
--- a/operations/mimir-tests/test-memberlist-cluster-label-migration-step-1-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-cluster-label-migration-step-1-generated.yaml
@@ -535,7 +535,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -633,7 +633,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -713,7 +713,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -785,7 +785,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -878,7 +878,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -956,7 +956,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1056,7 +1056,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1166,7 +1166,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1260,7 +1260,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1314,7 +1314,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1368,7 +1368,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1422,7 +1422,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1466,14 +1466,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1508,7 +1508,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-cluster-label-migration-step-2-generated.yaml b/operations/mimir-tests/test-memberlist-cluster-label-migration-step-2-generated.yaml
index d84f0aaedc9..5d70dfaf6f6 100644
--- a/operations/mimir-tests/test-memberlist-cluster-label-migration-step-2-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-cluster-label-migration-step-2-generated.yaml
@@ -536,7 +536,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -635,7 +635,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -715,7 +715,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -787,7 +787,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -881,7 +881,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -960,7 +960,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1061,7 +1061,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1172,7 +1172,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1266,7 +1266,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1320,7 +1320,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1374,7 +1374,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1428,7 +1428,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1472,14 +1472,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1515,7 +1515,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-cluster-label-migration-step-3-generated.yaml b/operations/mimir-tests/test-memberlist-cluster-label-migration-step-3-generated.yaml
index 6de17689495..1a6cc4d3a4f 100644
--- a/operations/mimir-tests/test-memberlist-cluster-label-migration-step-3-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-cluster-label-migration-step-3-generated.yaml
@@ -535,7 +535,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -633,7 +633,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -713,7 +713,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -785,7 +785,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -878,7 +878,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -956,7 +956,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1056,7 +1056,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1166,7 +1166,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1260,7 +1260,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1314,7 +1314,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1368,7 +1368,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1422,7 +1422,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1466,14 +1466,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1508,7 +1508,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml
index e30515568b8..753c1a8f3df 100644
--- a/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml
@@ -922,7 +922,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1016,7 +1016,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1094,7 +1094,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1166,7 +1166,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1258,7 +1258,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1333,7 +1333,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1428,7 +1428,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1533,7 +1533,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1625,7 +1625,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1679,7 +1679,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1733,7 +1733,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1787,7 +1787,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1830,14 +1830,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1870,7 +1870,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml
index b3d5cd18139..e1425f1120e 100644
--- a/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml
@@ -966,7 +966,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1069,7 +1069,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1149,7 +1149,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1221,7 +1221,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1322,7 +1322,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1402,7 +1402,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1504,7 +1504,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1616,7 +1616,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1710,7 +1710,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1764,7 +1764,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1818,7 +1818,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1872,7 +1872,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1916,14 +1916,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1960,7 +1960,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml
index eea9e8fbf10..71b8ebce796 100644
--- a/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml
@@ -966,7 +966,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1069,7 +1069,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1149,7 +1149,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1221,7 +1221,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1322,7 +1322,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1402,7 +1402,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1504,7 +1504,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1616,7 +1616,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1710,7 +1710,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1764,7 +1764,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1818,7 +1818,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1872,7 +1872,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1916,14 +1916,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1960,7 +1960,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml
index 76633778237..e4aa7e9916a 100644
--- a/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml
@@ -966,7 +966,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1069,7 +1069,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1149,7 +1149,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1221,7 +1221,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1322,7 +1322,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1402,7 +1402,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1504,7 +1504,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1616,7 +1616,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1710,7 +1710,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1764,7 +1764,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1818,7 +1818,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1872,7 +1872,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1916,14 +1916,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1960,7 +1960,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml
index 080c632747a..336d9c8c184 100644
--- a/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml
@@ -966,7 +966,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1069,7 +1069,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1149,7 +1149,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1221,7 +1221,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1322,7 +1322,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1402,7 +1402,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1504,7 +1504,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1616,7 +1616,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1710,7 +1710,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1764,7 +1764,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1818,7 +1818,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1872,7 +1872,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1916,14 +1916,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1960,7 +1960,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml
index 5f72395f81a..d2e7f3a2611 100644
--- a/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml
@@ -537,7 +537,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -634,7 +634,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -714,7 +714,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -786,7 +786,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -878,7 +878,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -955,7 +955,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1054,7 +1054,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1163,7 +1163,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1257,7 +1257,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1311,7 +1311,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1365,7 +1365,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1419,7 +1419,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1463,14 +1463,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1504,7 +1504,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml
index ca450b33c80..61d6c51a63d 100644
--- a/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml
+++ b/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -631,7 +631,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -711,7 +711,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -783,7 +783,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -875,7 +875,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -952,7 +952,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1051,7 +1051,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1160,7 +1160,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1254,7 +1254,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1308,7 +1308,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1362,7 +1362,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1416,7 +1416,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1460,14 +1460,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1501,7 +1501,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-memcached-mtls-generated.yaml b/operations/mimir-tests/test-memcached-mtls-generated.yaml
index b4c6d3687bb..1c50344ad3c 100644
--- a/operations/mimir-tests/test-memcached-mtls-generated.yaml
+++ b/operations/mimir-tests/test-memcached-mtls-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -637,7 +637,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -741,7 +741,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -831,7 +831,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -935,7 +935,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1012,7 +1012,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1111,7 +1111,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1220,7 +1220,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1327,7 +1327,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1404,7 +1404,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1481,7 +1481,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1558,7 +1558,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1613,7 +1613,7 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.chunks-cache.memcached.tls-ca-path=/var/secrets/memcached-ca-cert/memcached-ca-cert.pem
- -blocks-storage.bucket-store.chunks-cache.memcached.tls-cert-path=/var/secrets/memcached-client-cert/memcached-client-cert.pem
- -blocks-storage.bucket-store.chunks-cache.memcached.tls-enabled=true
@@ -1626,7 +1626,7 @@ spec:
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.memcached.tls-ca-path=/var/secrets/memcached-ca-cert/memcached-ca-cert.pem
- -blocks-storage.bucket-store.index-cache.memcached.tls-cert-path=/var/secrets/memcached-client-cert/memcached-client-cert.pem
- -blocks-storage.bucket-store.index-cache.memcached.tls-enabled=true
@@ -1671,7 +1671,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-multi-zone-generated.yaml b/operations/mimir-tests/test-multi-zone-generated.yaml
index 04c6d813f2e..31ab3f87b1f 100644
--- a/operations/mimir-tests/test-multi-zone-generated.yaml
+++ b/operations/mimir-tests/test-multi-zone-generated.yaml
@@ -711,7 +711,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -810,7 +810,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -890,7 +890,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -962,7 +962,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1100,7 +1100,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1177,7 +1177,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1276,7 +1276,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1406,7 +1406,7 @@ spec:
value: "1000"
- name: Z
value: "123"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1530,7 +1530,7 @@ spec:
value: all-ingesters
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1654,7 +1654,7 @@ spec:
value: all-ingesters
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1748,7 +1748,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1802,7 +1802,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1856,7 +1856,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1910,7 +1910,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1965,14 +1965,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2010,7 +2010,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2105,14 +2105,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2152,7 +2152,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2247,14 +2247,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2292,7 +2292,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml
index 96f80d62904..30c62f5c55c 100644
--- a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml
+++ b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml
@@ -779,7 +779,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -878,7 +878,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -958,7 +958,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1030,7 +1030,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1168,7 +1168,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1245,7 +1245,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1344,7 +1344,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1453,7 +1453,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1575,7 +1575,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1697,7 +1697,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1819,7 +1819,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1913,7 +1913,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1967,7 +1967,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2021,7 +2021,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2075,7 +2075,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -2119,14 +2119,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2160,7 +2160,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2255,14 +2255,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2298,7 +2298,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2393,14 +2393,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2436,7 +2436,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2531,14 +2531,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2574,7 +2574,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-multi-zone-with-store-gateway-automated-downscaling-generated.yaml b/operations/mimir-tests/test-multi-zone-with-store-gateway-automated-downscaling-generated.yaml
index c85fc71c8ed..64e56790adf 100644
--- a/operations/mimir-tests/test-multi-zone-with-store-gateway-automated-downscaling-generated.yaml
+++ b/operations/mimir-tests/test-multi-zone-with-store-gateway-automated-downscaling-generated.yaml
@@ -711,7 +711,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -810,7 +810,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -890,7 +890,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -962,7 +962,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1100,7 +1100,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1177,7 +1177,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1276,7 +1276,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1406,7 +1406,7 @@ spec:
value: "1000"
- name: Z
value: "123"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1530,7 +1530,7 @@ spec:
value: all-ingesters
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1654,7 +1654,7 @@ spec:
value: all-ingesters
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1748,7 +1748,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1802,7 +1802,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1856,7 +1856,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1910,7 +1910,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1969,14 +1969,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2015,7 +2015,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2114,14 +2114,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2162,7 +2162,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
@@ -2261,14 +2261,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -2307,7 +2307,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-node-selector-and-affinity-generated.yaml b/operations/mimir-tests/test-node-selector-and-affinity-generated.yaml
index 6dec33dad16..ff15bf5d987 100644
--- a/operations/mimir-tests/test-node-selector-and-affinity-generated.yaml
+++ b/operations/mimir-tests/test-node-selector-and-affinity-generated.yaml
@@ -482,7 +482,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -595,7 +595,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -691,7 +691,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -778,7 +778,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -873,7 +873,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -997,7 +997,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1093,7 +1093,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1149,7 +1149,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1205,7 +1205,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1261,7 +1261,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1320,14 +1320,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1361,7 +1361,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-query-scheduler-consul-ring-generated.yaml b/operations/mimir-tests/test-query-scheduler-consul-ring-generated.yaml
index 1ac8bcf4cd6..f5366b586c2 100644
--- a/operations/mimir-tests/test-query-scheduler-consul-ring-generated.yaml
+++ b/operations/mimir-tests/test-query-scheduler-consul-ring-generated.yaml
@@ -922,7 +922,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -1019,7 +1019,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -1100,7 +1100,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -1176,7 +1176,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1268,7 +1268,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1343,7 +1343,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1438,7 +1438,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1543,7 +1543,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1635,7 +1635,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1689,7 +1689,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1743,7 +1743,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1797,7 +1797,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1840,14 +1840,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1880,7 +1880,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-query-scheduler-memberlist-ring-and-ruler-remote-evaluation-generated.yaml b/operations/mimir-tests/test-query-scheduler-memberlist-ring-and-ruler-remote-evaluation-generated.yaml
index 8951e33af2d..fd779fac009 100644
--- a/operations/mimir-tests/test-query-scheduler-memberlist-ring-and-ruler-remote-evaluation-generated.yaml
+++ b/operations/mimir-tests/test-query-scheduler-memberlist-ring-and-ruler-remote-evaluation-generated.yaml
@@ -669,7 +669,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -769,7 +769,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -854,7 +854,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -934,7 +934,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -1031,7 +1031,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1128,7 +1128,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-querier
ports:
@@ -1216,7 +1216,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-frontend
ports:
@@ -1296,7 +1296,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-scheduler
ports:
@@ -1367,7 +1367,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1467,7 +1467,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1577,7 +1577,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1671,7 +1671,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1725,7 +1725,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1779,7 +1779,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1833,7 +1833,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1877,14 +1877,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1919,7 +1919,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-query-scheduler-memberlist-ring-generated.yaml b/operations/mimir-tests/test-query-scheduler-memberlist-ring-generated.yaml
index 9d3d5c472b0..316909017f5 100644
--- a/operations/mimir-tests/test-query-scheduler-memberlist-ring-generated.yaml
+++ b/operations/mimir-tests/test-query-scheduler-memberlist-ring-generated.yaml
@@ -543,7 +543,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -643,7 +643,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -728,7 +728,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -808,7 +808,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -903,7 +903,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -981,7 +981,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1081,7 +1081,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1191,7 +1191,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1285,7 +1285,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1339,7 +1339,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1393,7 +1393,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1447,7 +1447,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1491,14 +1491,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1533,7 +1533,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-query-scheduler-memberlist-ring-read-path-disabled-generated.yaml b/operations/mimir-tests/test-query-scheduler-memberlist-ring-read-path-disabled-generated.yaml
index e68508c22c7..d658547b799 100644
--- a/operations/mimir-tests/test-query-scheduler-memberlist-ring-read-path-disabled-generated.yaml
+++ b/operations/mimir-tests/test-query-scheduler-memberlist-ring-read-path-disabled-generated.yaml
@@ -540,7 +540,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -638,7 +638,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -718,7 +718,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -796,7 +796,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -891,7 +891,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -969,7 +969,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1069,7 +1069,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1179,7 +1179,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1273,7 +1273,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1327,7 +1327,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1381,7 +1381,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1435,7 +1435,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1479,14 +1479,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1521,7 +1521,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-query-sharding-generated.yaml b/operations/mimir-tests/test-query-sharding-generated.yaml
index 31d7fe674bb..23da5b00a25 100644
--- a/operations/mimir-tests/test-query-sharding-generated.yaml
+++ b/operations/mimir-tests/test-query-sharding-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -631,7 +631,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -716,7 +716,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -788,7 +788,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -880,7 +880,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -957,7 +957,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1056,7 +1056,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1165,7 +1165,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1259,7 +1259,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1313,7 +1313,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1367,7 +1367,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1421,7 +1421,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1465,14 +1465,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1506,7 +1506,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-read-write-deployment-mode-s3-autoscaled-generated.yaml b/operations/mimir-tests/test-read-write-deployment-mode-s3-autoscaled-generated.yaml
index b848c4fd64d..ae0e09a6879 100644
--- a/operations/mimir-tests/test-read-write-deployment-mode-s3-autoscaled-generated.yaml
+++ b/operations/mimir-tests/test-read-write-deployment-mode-s3-autoscaled-generated.yaml
@@ -557,7 +557,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-read
ports:
@@ -684,7 +684,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -738,7 +738,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -792,7 +792,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -846,7 +846,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -906,14 +906,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -998,7 +998,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -1098,14 +1098,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1190,7 +1190,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -1290,14 +1290,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1382,7 +1382,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -1520,7 +1520,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -1658,7 +1658,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -1796,7 +1796,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
diff --git a/operations/mimir-tests/test-read-write-deployment-mode-s3-caches-disabled-generated.yaml b/operations/mimir-tests/test-read-write-deployment-mode-s3-caches-disabled-generated.yaml
index 88d32c1748f..2be277142e4 100644
--- a/operations/mimir-tests/test-read-write-deployment-mode-s3-caches-disabled-generated.yaml
+++ b/operations/mimir-tests/test-read-write-deployment-mode-s3-caches-disabled-generated.yaml
@@ -420,7 +420,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-read
ports:
@@ -621,7 +621,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -789,7 +789,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -957,7 +957,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -1095,7 +1095,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -1233,7 +1233,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -1371,7 +1371,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
diff --git a/operations/mimir-tests/test-read-write-deployment-mode-s3-generated.yaml b/operations/mimir-tests/test-read-write-deployment-mode-s3-generated.yaml
index 7598cc1a939..0b2c3807ef4 100644
--- a/operations/mimir-tests/test-read-write-deployment-mode-s3-generated.yaml
+++ b/operations/mimir-tests/test-read-write-deployment-mode-s3-generated.yaml
@@ -558,7 +558,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-read
ports:
@@ -685,7 +685,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -739,7 +739,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -793,7 +793,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -847,7 +847,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -907,14 +907,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -999,7 +999,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -1099,14 +1099,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1191,7 +1191,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -1291,14 +1291,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1383,7 +1383,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-backend
ports:
@@ -1521,7 +1521,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -1659,7 +1659,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
@@ -1797,7 +1797,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: mimir-write
ports:
diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml
index 6c7e2b33c50..9e97610fd1a 100644
--- a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml
+++ b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml
@@ -651,7 +651,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -748,7 +748,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -828,7 +828,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -900,7 +900,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -994,7 +994,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1088,7 +1088,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-querier
ports:
@@ -1171,7 +1171,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-frontend
ports:
@@ -1243,7 +1243,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-scheduler
ports:
@@ -1311,7 +1311,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1410,7 +1410,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1519,7 +1519,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1613,7 +1613,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1667,7 +1667,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1721,7 +1721,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1775,7 +1775,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1819,14 +1819,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1860,7 +1860,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml
index 0a80ee428ea..ceb8766647d 100644
--- a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml
+++ b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml
@@ -651,7 +651,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -748,7 +748,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -828,7 +828,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -900,7 +900,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -992,7 +992,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -1086,7 +1086,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-querier
ports:
@@ -1169,7 +1169,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-frontend
ports:
@@ -1241,7 +1241,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler-query-scheduler
ports:
@@ -1309,7 +1309,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1408,7 +1408,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1517,7 +1517,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1611,7 +1611,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1665,7 +1665,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1719,7 +1719,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1773,7 +1773,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1817,14 +1817,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1858,7 +1858,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-shuffle-sharding-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-generated.yaml
index df120e339af..b60509737f1 100644
--- a/operations/mimir-tests/test-shuffle-sharding-generated.yaml
+++ b/operations/mimir-tests/test-shuffle-sharding-generated.yaml
@@ -535,7 +535,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -634,7 +634,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -715,7 +715,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -788,7 +788,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -883,7 +883,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -960,7 +960,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1059,7 +1059,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1169,7 +1169,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1263,7 +1263,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1317,7 +1317,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1371,7 +1371,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1425,7 +1425,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1469,14 +1469,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1511,7 +1511,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml
index 599825a6701..25731f49e5a 100644
--- a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml
+++ b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml
@@ -535,7 +535,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -634,7 +634,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -715,7 +715,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -788,7 +788,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -884,7 +884,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -961,7 +961,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1060,7 +1060,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1170,7 +1170,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1264,7 +1264,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1318,7 +1318,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1372,7 +1372,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1426,7 +1426,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1470,14 +1470,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1512,7 +1512,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-storage-azure-generated.yaml b/operations/mimir-tests/test-storage-azure-generated.yaml
index 152b835e1c3..628c192b727 100644
--- a/operations/mimir-tests/test-storage-azure-generated.yaml
+++ b/operations/mimir-tests/test-storage-azure-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -633,7 +633,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -713,7 +713,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -785,7 +785,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -879,7 +879,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -958,7 +958,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1059,7 +1059,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1170,7 +1170,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1264,7 +1264,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1318,7 +1318,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1372,7 +1372,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1426,7 +1426,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1471,14 +1471,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1513,7 +1513,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-storage-gcs-generated.yaml b/operations/mimir-tests/test-storage-gcs-generated.yaml
index ca450b33c80..61d6c51a63d 100644
--- a/operations/mimir-tests/test-storage-gcs-generated.yaml
+++ b/operations/mimir-tests/test-storage-gcs-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -631,7 +631,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -711,7 +711,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -783,7 +783,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -875,7 +875,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -952,7 +952,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1051,7 +1051,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1160,7 +1160,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1254,7 +1254,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1308,7 +1308,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1362,7 +1362,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1416,7 +1416,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1460,14 +1460,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1501,7 +1501,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-storage-gcs-redis-generated.yaml b/operations/mimir-tests/test-storage-gcs-redis-generated.yaml
index 49768615c5e..95a60373135 100644
--- a/operations/mimir-tests/test-storage-gcs-redis-generated.yaml
+++ b/operations/mimir-tests/test-storage-gcs-redis-generated.yaml
@@ -406,7 +406,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -503,7 +503,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -580,7 +580,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -652,7 +652,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -744,7 +744,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -821,7 +821,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -920,7 +920,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1029,7 +1029,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1152,7 +1152,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-storage-s3-generated.yaml b/operations/mimir-tests/test-storage-s3-generated.yaml
index 221591690df..40d1746dd6d 100644
--- a/operations/mimir-tests/test-storage-s3-generated.yaml
+++ b/operations/mimir-tests/test-storage-s3-generated.yaml
@@ -534,7 +534,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -632,7 +632,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -712,7 +712,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -784,7 +784,7 @@ spec:
- -server.http-listen-port=8080
- -target=query-scheduler
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-scheduler
ports:
@@ -877,7 +877,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ruler
ports:
@@ -955,7 +955,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: alertmanager
ports:
@@ -1055,7 +1055,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -1165,7 +1165,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -1259,7 +1259,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1313,7 +1313,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1367,7 +1367,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1421,7 +1421,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1465,14 +1465,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1507,7 +1507,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir-tests/test-without-query-scheduler-generated.yaml b/operations/mimir-tests/test-without-query-scheduler-generated.yaml
index f3c6acaa7da..5902f52c4fb 100644
--- a/operations/mimir-tests/test-without-query-scheduler-generated.yaml
+++ b/operations/mimir-tests/test-without-query-scheduler-generated.yaml
@@ -437,7 +437,7 @@ spec:
value: "8"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: distributor
ports:
@@ -534,7 +534,7 @@ spec:
value: "5"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: querier
ports:
@@ -613,7 +613,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "5000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
@@ -700,7 +700,7 @@ spec:
- -server.http-listen-port=8080
- -target=compactor
- -usage-stats.installation-mode=jsonnet
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: compactor
ports:
@@ -809,7 +809,7 @@ spec:
env:
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: ingester
ports:
@@ -903,7 +903,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -957,7 +957,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1011,7 +1011,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1065,7 +1065,7 @@ spec:
- args:
- --memcached.address=localhost:11211
- --web.listen-address=0.0.0.0:9150
- image: prom/memcached-exporter:v0.14.2
+ image: prom/memcached-exporter:v0.14.3
imagePullPolicy: IfNotPresent
name: exporter
ports:
@@ -1109,14 +1109,14 @@ spec:
- -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576
- - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.index-cache.backend=memcached
- -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50
- -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100
- -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=150
- -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880
- - -blocks-storage.bucket-store.index-cache.memcached.timeout=450ms
+ - -blocks-storage.bucket-store.index-cache.memcached.timeout=750ms
- -blocks-storage.bucket-store.metadata-cache.backend=memcached
- -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local.:11211
- -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50
@@ -1150,7 +1150,7 @@ spec:
value: "12884901888"
- name: JAEGER_REPORTER_MAX_QUEUE_SIZE
value: "1000"
- image: grafana/mimir:2.11.0
+ image: grafana/mimir:2.12.0
imagePullPolicy: IfNotPresent
name: store-gateway
ports:
diff --git a/operations/mimir/autoscaling.libsonnet b/operations/mimir/autoscaling.libsonnet
index 22e2b7044bd..f55b3209edc 100644
--- a/operations/mimir/autoscaling.libsonnet
+++ b/operations/mimir/autoscaling.libsonnet
@@ -252,7 +252,9 @@
// The "up" metrics correctly handles the stale marker when the pod is terminated, while it’s not the
// case for the cAdvisor metrics. By intersecting these 2 metrics, we only look the CPU utilization
// of containers there are running at any given time, without suffering the PromQL lookback period.
-
+ //
+ // The second section of the query ensures that it only returns a result if all expected samples were
+ // present for the CPU metric over the last 15 minutes.
local cpuHPAQuery = |||
max_over_time(
sum(
@@ -261,6 +263,14 @@
max by (pod) (up{container="%(container)s",namespace="%(namespace)s"}) > 0
)[15m:]
) * 1000
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_cpu_usage_seconds_total{container="%(container)s",namespace="%(namespace)s"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
|||,
// To scale out relatively quickly, but scale in slower, we look at the max memory utilization across
@@ -268,8 +278,11 @@
// The "up" metrics correctly handles the stale marker when the pod is terminated, while it’s not the
// case for the cAdvisor metrics. By intersecting these 2 metrics, we only look the memory utilization
// of containers there are running at any given time, without suffering the PromQL lookback period.
- // If a pod is terminated because it OOMs, we still want to scale up -- add the memory resource request of OOMing
- // pods to the memory metric calculation.
+ //
+ // The second section of the query adds pods that were terminated due to an OOM in the memory calculation.
+ //
+ // The third section of the query ensures that it only returns a result if all expected samples were
+ // present for the memory metric over the last 15 minutes.
local memoryHPAQuery = |||
max_over_time(
sum(
@@ -289,6 +302,14 @@
max by (pod) (kube_pod_container_status_last_terminated_reason{container="%(container)s", namespace="%(namespace)s", reason="OOMKilled"})
or vector(0)
)
+ and
+ count (
+ count_over_time(
+ present_over_time(
+ container_memory_working_set_bytes{container="%(container)s",namespace="%(namespace)s"}[1m]
+ )[15m:1m]
+ ) >= 15
+ )
|||,
newResourceScaledObject(
@@ -321,6 +342,9 @@
// Threshold is expected to be a string
threshold: std.toString(std.floor(cpuToMilliCPUInt(cpu_requests) * cpu_target_utilization)),
+ // Disable ignoring null values. This allows HPAs to effectively pause when metrics are unavailable rather than scaling
+ // up or down unexpectedly. See https://keda.sh/docs/2.13/scalers/prometheus/ for more info.
+ ignore_null_values: false,
},
{
metric_name: '%s%s_memory_hpa_%s' %
@@ -333,6 +357,9 @@
// Threshold is expected to be a string
threshold: std.toString(std.floor($.util.siToBytes(memory_requests) * memory_target_utilization)),
+ // Disable ignoring null values. This allows HPAs to effectively pause when metrics are unavailable rather than scaling
+ // up or down unexpectedly. See https://keda.sh/docs/2.13/scalers/prometheus/ for more info.
+ ignore_null_values: false,
},
],
},
diff --git a/operations/mimir/compactor.libsonnet b/operations/mimir/compactor.libsonnet
index 02a71155bfe..a073377412d 100644
--- a/operations/mimir/compactor.libsonnet
+++ b/operations/mimir/compactor.libsonnet
@@ -102,14 +102,28 @@
$.util.readinessProbe +
$.jaeger_mixin,
- newCompactorStatefulSet(name, container, nodeAffinityMatchers=[])::
+ newCompactorStatefulSet(name, container, nodeAffinityMatchers=[], concurrent_rollout_enabled=false, max_unavailable=1)::
$.newMimirStatefulSet(name, 1, container, compactor_data_pvc) +
$.newMimirNodeAffinityMatchers(nodeAffinityMatchers) +
statefulSet.mixin.spec.template.spec.withTerminationGracePeriodSeconds(900) +
- $.mimirVolumeMounts,
+ $.mimirVolumeMounts +
+ (
+ if !concurrent_rollout_enabled then {} else
+ statefulSet.mixin.spec.selector.withMatchLabels({ name: 'compactor', 'rollout-group': 'compactor' }) +
+ statefulSet.mixin.spec.updateStrategy.withType('OnDelete') +
+ statefulSet.mixin.metadata.withLabelsMixin({ 'rollout-group': 'compactor' }) +
+ statefulSet.mixin.metadata.withAnnotationsMixin({ 'rollout-max-unavailable': std.toString(max_unavailable) }) +
+ statefulSet.mixin.spec.template.metadata.withLabelsMixin({ 'rollout-group': 'compactor' })
+ ),
compactor_statefulset: if !$._config.is_microservices_deployment_mode then null else
- $.newCompactorStatefulSet('compactor', $.compactor_container, $.compactor_node_affinity_matchers),
+ $.newCompactorStatefulSet(
+ 'compactor',
+ $.compactor_container,
+ $.compactor_node_affinity_matchers,
+ $._config.cortex_compactor_concurrent_rollout_enabled,
+ $._config.cortex_compactor_max_unavailable,
+ ),
compactor_service: if !$._config.is_microservices_deployment_mode then null else
local service = $.core.v1.service;
diff --git a/operations/mimir/config.libsonnet b/operations/mimir/config.libsonnet
index 749e93bf307..1bcdc9cb97c 100644
--- a/operations/mimir/config.libsonnet
+++ b/operations/mimir/config.libsonnet
@@ -92,6 +92,12 @@
// While this is the default value, we want to pass the same to the -blocks-storage.bucket-store.sync-interval
compactor_cleanup_interval: '15m',
+ // Enable concurrent rollout of compactor through the usage of the rollout operator.
+ // This feature modifies the compactor StatefulSet which cannot be altered, so if it already exists it has to be deleted and re-applied again in order to be enabled.
+ cortex_compactor_concurrent_rollout_enabled: false,
+ // Maximum number of unavailable replicas during a compactor rollout when using cortex_compactor_concurrent_rollout_enabled feature.
+ cortex_compactor_max_unavailable: std.max($.compactor_statefulset.spec.replicas / 2, 1),
+
// Enable use of bucket index by querier, ruler and store-gateway.
bucket_index_enabled: true,
@@ -699,7 +705,7 @@
'blocks-storage.bucket-store.index-cache.memcached.addresses': 'dnssrvnoa+%(cache_index_queries_backend)s-index-queries.%(namespace)s.svc.%(cluster_domain)s:11211' % $._config,
'blocks-storage.bucket-store.index-cache.memcached.max-item-size': $._config.cache_index_queries_max_item_size_mb * 1024 * 1024,
'blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency': 50,
- 'blocks-storage.bucket-store.index-cache.memcached.timeout': '450ms',
+ 'blocks-storage.bucket-store.index-cache.memcached.timeout': '750ms',
} + if $._config.memcached_index_queries_mtls_enabled then {
'blocks-storage.bucket-store.index-cache.memcached.addresses': 'dnssrvnoa+%(cache_index_queries_backend)s-index-queries.%(namespace)s.svc.%(cluster_domain)s:11212' % $._config,
'blocks-storage.bucket-store.index-cache.memcached.connect-timeout': '1s',
@@ -724,7 +730,7 @@
'blocks-storage.bucket-store.chunks-cache.memcached.addresses': 'dnssrvnoa+%(cache_chunks_backend)s.%(namespace)s.svc.%(cluster_domain)s:11211' % $._config,
'blocks-storage.bucket-store.chunks-cache.memcached.max-item-size': $._config.cache_chunks_max_item_size_mb * 1024 * 1024,
'blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency': 50,
- 'blocks-storage.bucket-store.chunks-cache.memcached.timeout': '450ms',
+ 'blocks-storage.bucket-store.chunks-cache.memcached.timeout': '750ms',
} + if $._config.memcached_chunks_mtls_enabled then {
'blocks-storage.bucket-store.chunks-cache.memcached.addresses': 'dnssrvnoa+%(cache_chunks_backend)s.%(namespace)s.svc.%(cluster_domain)s:11212' % $._config,
'blocks-storage.bucket-store.chunks-cache.memcached.connect-timeout': '1s',
diff --git a/operations/mimir/images.libsonnet b/operations/mimir/images.libsonnet
index d173362965f..004dd00812d 100644
--- a/operations/mimir/images.libsonnet
+++ b/operations/mimir/images.libsonnet
@@ -2,10 +2,10 @@
_images+:: {
// Various third-party images.
memcached: 'memcached:1.6.22-alpine',
- memcachedExporter: 'prom/memcached-exporter:v0.14.2',
+ memcachedExporter: 'prom/memcached-exporter:v0.14.3',
// Our services.
- mimir: 'grafana/mimir:2.11.0',
+ mimir: 'grafana/mimir:2.12.0',
alertmanager: self.mimir,
distributor: self.mimir,
@@ -19,8 +19,8 @@
query_scheduler: self.mimir,
overrides_exporter: self.mimir,
- query_tee: 'grafana/query-tee:2.11.0',
- continuous_test: 'grafana/mimir-continuous-test:2.11.0',
+ query_tee: 'grafana/query-tee:2.12.0',
+ continuous_test: 'grafana/mimir-continuous-test:2.12.0',
// Read-write deployment mode.
mimir_write: self.mimir,
diff --git a/operations/mimir/multi-zone.libsonnet b/operations/mimir/multi-zone.libsonnet
index 6847289ea1e..bb6f16fd125 100644
--- a/operations/mimir/multi-zone.libsonnet
+++ b/operations/mimir/multi-zone.libsonnet
@@ -341,7 +341,7 @@
// Rollout operator.
//
- local rollout_operator_enabled = $._config.multi_zone_ingester_enabled || $._config.multi_zone_store_gateway_enabled,
+ local rollout_operator_enabled = $._config.multi_zone_ingester_enabled || $._config.multi_zone_store_gateway_enabled || $._config.cortex_compactor_concurrent_rollout_enabled,
rollout_operator_args:: {
'kubernetes.namespace': $._config.namespace,
diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go
index 4036334c3b3..43129e4c771 100644
--- a/pkg/alertmanager/multitenant_test.go
+++ b/pkg/alertmanager/multitenant_test.go
@@ -132,7 +132,7 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) {
expected error
}{
"should pass with default config": {
- setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {},
+ setup: func(*testing.T, *MultitenantAlertmanagerConfig) {},
expected: nil,
},
"should fail with empty external URL": {
@@ -142,13 +142,13 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) {
expected: errEmptyExternalURL,
},
"should fail if persistent interval is 0": {
- setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {
+ setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) {
cfg.Persister.Interval = 0
},
expected: errInvalidPersistInterval,
},
"should fail if persistent interval is negative": {
- setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {
+ setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) {
cfg.Persister.Interval = -1
},
expected: errInvalidPersistInterval,
@@ -178,7 +178,7 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) {
expected: errInvalidExternalURLMissingHostname,
},
"should fail if zone aware is enabled but zone is not set": {
- setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {
+ setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) {
cfg.ShardingRing.ZoneAwarenessEnabled = true
},
expected: errZoneAwarenessEnabledWithoutZoneInfo,
@@ -624,7 +624,7 @@ receivers:
serverInvoked := atomic.NewBool(false)
// Create a local HTTP server to test whether the request is received.
- server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
+ server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, _ *http.Request) {
serverInvoked.Store(true)
writer.WriteHeader(http.StatusOK)
}))
diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go
index 919ba65d2ae..27f5906c97b 100644
--- a/pkg/api/api_test.go
+++ b/pkg/api/api_test.go
@@ -189,7 +189,7 @@ func TestApiGzip(t *testing.T) {
})
}
- t.Run("compressed with gzip", func(t *testing.T) {
+ t.Run("compressed with gzip", func(*testing.T) {
})
}
diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go
index 4677d0ee0db..45a0b941d65 100644
--- a/pkg/api/handlers.go
+++ b/pkg/api/handlers.go
@@ -125,7 +125,7 @@ func indexHandler(httpPathPrefix string, content *IndexPageContent) http.Handler
})
template.Must(templ.Parse(indexPageHTML))
- return func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, _ *http.Request) {
err := templ.Execute(w, indexPageContents{LinkGroups: content.GetContent()})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -180,7 +180,7 @@ type configResponse struct {
}
func (cfg *Config) statusConfigHandler() http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, _ *http.Request) {
response := configResponse{
Status: "success",
Config: map[string]string{},
@@ -195,7 +195,7 @@ type flagsResponse struct {
}
func (cfg *Config) statusFlagsHandler() http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, _ *http.Request) {
response := flagsResponse{
Status: "success",
Flags: map[string]string{},
diff --git a/pkg/api/handlers_test.go b/pkg/api/handlers_test.go
index 8aafb96ac89..1fbe8555208 100644
--- a/pkg/api/handlers_test.go
+++ b/pkg/api/handlers_test.go
@@ -175,7 +175,7 @@ func TestConfigDiffHandler(t *testing.T) {
func TestConfigOverrideHandler(t *testing.T) {
cfg := &Config{
CustomConfigHandler: func(_ interface{}, _ interface{}) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, _ *http.Request) {
_, err := w.Write([]byte("config"))
assert.NoError(t, err)
}
diff --git a/pkg/api/tenant.go b/pkg/api/tenant.go
index 227c072241a..16f84c5de3c 100644
--- a/pkg/api/tenant.go
+++ b/pkg/api/tenant.go
@@ -29,14 +29,14 @@ func newTenantValidationMiddleware(federation bool, maxTenants int) middleware.I
return
}
- numIds := len(ids)
- if !federation && numIds > 1 {
- http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, 1, numIds), http.StatusUnprocessableEntity)
+ numIDs := len(ids)
+ if !federation && numIDs > 1 {
+ http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, 1, numIDs), http.StatusUnprocessableEntity)
return
}
- if federation && maxTenants > 0 && numIds > maxTenants {
- http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, maxTenants, numIds), http.StatusUnprocessableEntity)
+ if federation && maxTenants > 0 && numIDs > maxTenants {
+ http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, maxTenants, numIDs), http.StatusUnprocessableEntity)
return
}
diff --git a/pkg/api/tenant_test.go b/pkg/api/tenant_test.go
index 034b4661f22..431c37faec9 100644
--- a/pkg/api/tenant_test.go
+++ b/pkg/api/tenant_test.go
@@ -90,7 +90,7 @@ func TestNewTenantValidationMiddleware(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
- nop := http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {})
+ nop := http.HandlerFunc(func(http.ResponseWriter, *http.Request) {})
// Note that we add the authentication middleware since the tenant validation middleware relies
// on tenant ID being set in the context associated with the request.
handler := middleware.Merge(middleware.AuthenticateUser, newTenantValidationMiddleware(tc.federation, tc.maxTenants)).Wrap(nop)
diff --git a/pkg/compactor/block_upload_test.go b/pkg/compactor/block_upload_test.go
index 1b8071f6cfe..7bb124cb90b 100644
--- a/pkg/compactor/block_upload_test.go
+++ b/pkg/compactor/block_upload_test.go
@@ -1769,7 +1769,7 @@ func TestMultitenantCompactor_PeriodicValidationUpdater(t *testing.T) {
},
{
name: "updating validation file succeeds",
- assertions: func(t *testing.T, ctx context.Context, bkt objstore.Bucket) {
+ assertions: func(t *testing.T, _ context.Context, bkt objstore.Bucket) {
test.Poll(t, heartbeatInterval*2, true, func() interface{} {
return validationExists(t, bkt)
})
@@ -1787,7 +1787,7 @@ func TestMultitenantCompactor_PeriodicValidationUpdater(t *testing.T) {
{
name: "context cancelled before update",
cancelContext: true,
- assertions: func(t *testing.T, ctx context.Context, bkt objstore.Bucket) {
+ assertions: func(t *testing.T, _ context.Context, bkt objstore.Bucket) {
require.False(t, validationExists(t, bkt))
},
},
diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go
index 1439182f934..e46dfee5102 100644
--- a/pkg/compactor/blocks_cleaner.go
+++ b/pkg/compactor/blocks_cleaner.go
@@ -683,18 +683,18 @@ func stalePartialBlockLastModifiedTime(ctx context.Context, blockID ulid.ULID, u
}
func estimateCompactionJobsFromBucketIndex(ctx context.Context, userID string, userBucket objstore.InstrumentedBucket, idx *bucketindex.Index, compactionBlockRanges mimir_tsdb.DurationList, mergeShards int, splitGroups int) ([]*Job, error) {
- metas := convertBucketIndexToMetasForCompactionJobPlanning(idx)
+ metas := ConvertBucketIndexToMetasForCompactionJobPlanning(idx)
// We need to pass this metric to MetadataFilters, but we don't need to report this value from BlocksCleaner.
synced := newNoopGaugeVec()
for _, f := range []block.MetadataFilter{
+ NewLabelRemoverFilter(compactionIgnoredLabels),
// We don't include ShardAwareDeduplicateFilter, because it relies on list of compaction sources, which are not present in the BucketIndex.
// We do include NoCompactionMarkFilter to avoid computing jobs from blocks that are marked for no-compaction.
NewNoCompactionMarkFilter(userBucket),
} {
- err := f.Filter(ctx, metas, synced)
- if err != nil {
+ if err := f.Filter(ctx, metas, synced); err != nil {
return nil, err
}
}
@@ -705,23 +705,31 @@ func estimateCompactionJobsFromBucketIndex(ctx context.Context, userID string, u
}
// Convert index into map of block Metas, but ignore blocks marked for deletion.
-func convertBucketIndexToMetasForCompactionJobPlanning(idx *bucketindex.Index) map[ulid.ULID]*block.Meta {
+func ConvertBucketIndexToMetasForCompactionJobPlanning(idx *bucketindex.Index) map[ulid.ULID]*block.Meta {
deletedULIDs := idx.BlockDeletionMarks.GetULIDs()
- deleted := make(map[ulid.ULID]bool, len(deletedULIDs))
+ deleted := make(map[ulid.ULID]struct{}, len(deletedULIDs))
for _, id := range deletedULIDs {
- deleted[id] = true
+ deleted[id] = struct{}{}
}
metas := map[ulid.ULID]*block.Meta{}
for _, b := range idx.Blocks {
- if deleted[b.ID] {
+ if _, del := deleted[b.ID]; del {
continue
}
metas[b.ID] = b.ThanosMeta()
if metas[b.ID].Thanos.Labels == nil {
metas[b.ID].Thanos.Labels = map[string]string{}
}
- metas[b.ID].Thanos.Labels[mimir_tsdb.CompactorShardIDExternalLabel] = b.CompactorShardID // Needed for correct planning.
+
+ // Correct planning depends on external labels being present. We didn't
+ // always persist labels into the bucket index, but we may have tracked
+ // the shard ID label, so copy that back over if it isn't there.
+ if b.CompactorShardID != "" {
+ if _, found := metas[b.ID].Thanos.Labels[mimir_tsdb.CompactorShardIDExternalLabel]; !found {
+ metas[b.ID].Thanos.Labels[mimir_tsdb.CompactorShardIDExternalLabel] = b.CompactorShardID
+ }
+ }
}
return metas
}
diff --git a/pkg/compactor/blocks_cleaner_test.go b/pkg/compactor/blocks_cleaner_test.go
index 2e883184f5c..b625fa05949 100644
--- a/pkg/compactor/blocks_cleaner_test.go
+++ b/pkg/compactor/blocks_cleaner_test.go
@@ -24,6 +24,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/testutil"
+ prom_tsdb "github.com/prometheus/prometheus/tsdb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/thanos-io/objstore"
@@ -1044,41 +1045,198 @@ func TestComputeCompactionJobs(t *testing.T) {
}
const user = "test"
-
twoHoursMS := 2 * time.Hour.Milliseconds()
dayMS := 24 * time.Hour.Milliseconds()
+ userBucket := bucket.NewUserBucketClient(user, bucketClient, nil)
+
+ // Mark block for no-compaction.
blockMarkedForNoCompact := ulid.MustNew(ulid.Now(), rand.Reader)
+ require.NoError(t, block.MarkForNoCompact(context.Background(), log.NewNopLogger(), userBucket, blockMarkedForNoCompact, block.CriticalNoCompactReason, "testing", promauto.With(nil).NewCounter(prometheus.CounterOpts{})))
- index := bucketindex.Index{}
- index.Blocks = bucketindex.Blocks{
- // Some 2h blocks that should be compacted together and split.
- &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 0, MaxTime: twoHoursMS},
- &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 0, MaxTime: twoHoursMS},
- &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 0, MaxTime: twoHoursMS},
+ cases := map[string]struct {
+ blocks bucketindex.Blocks
+ expectedSplits int
+ expectedMerges int
+ }{
+ "standard": {
+ blocks: bucketindex.Blocks{
+ // Some 2h blocks that should be compacted together and split.
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 0, MaxTime: twoHoursMS},
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 0, MaxTime: twoHoursMS},
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 0, MaxTime: twoHoursMS},
+
+ // Some merge jobs.
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "1_of_3"},
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "1_of_3"},
+
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "2_of_3"},
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "2_of_3"},
+
+ // This merge job is skipped, as block is marked for no-compaction.
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "3_of_3"},
+ &bucketindex.Block{ID: blockMarkedForNoCompact, MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "3_of_3"},
+ },
+ expectedSplits: 1,
+ expectedMerges: 2,
+ },
+ "labels don't match": {
+ blocks: bucketindex.Blocks{
+ // Compactor wouldn't produce a job for this pair as their external labels differ:
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 5 * dayMS, MaxTime: 6 * dayMS,
+ Labels: map[string]string{
+ tsdb.OutOfOrderExternalLabel: tsdb.OutOfOrderExternalLabelValue,
+ },
+ },
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 5 * dayMS, MaxTime: 6 * dayMS,
+ Labels: map[string]string{
+ "another_label": "-1",
+ },
+ },
+ },
+ expectedSplits: 0,
+ expectedMerges: 0,
+ },
+ "ignore deprecated labels": {
+ blocks: bucketindex.Blocks{
+ // Compactor will ignore deprecated labels when computing jobs. Estimation should do the same.
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 5 * dayMS, MaxTime: 6 * dayMS,
+ Labels: map[string]string{
+ "honored_label": "12345",
+ tsdb.DeprecatedTenantIDExternalLabel: "tenant1",
+ tsdb.DeprecatedIngesterIDExternalLabel: "ingester1",
+ },
+ },
+ &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: 5 * dayMS, MaxTime: 6 * dayMS,
+ Labels: map[string]string{
+ "honored_label": "12345",
+ tsdb.DeprecatedTenantIDExternalLabel: "tenant2",
+ tsdb.DeprecatedIngesterIDExternalLabel: "ingester2",
+ },
+ },
+ },
+ expectedSplits: 0,
+ expectedMerges: 1,
+ },
+ }
- // Some merge jobs.
- &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "1_of_3"},
- &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "1_of_3"},
+ for name, c := range cases {
+ t.Run(name, func(t *testing.T) {
+ index := &bucketindex.Index{Blocks: c.blocks}
+ jobs, err := estimateCompactionJobsFromBucketIndex(context.Background(), user, userBucket, index, cfg.CompactionBlockRanges, 3, 0)
+ require.NoError(t, err)
+ split, merge := computeSplitAndMergeJobs(jobs)
+ require.Equal(t, c.expectedSplits, split)
+ require.Equal(t, c.expectedMerges, merge)
+ })
+ }
+}
- &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "2_of_3"},
- &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "2_of_3"},
+func TestConvertBucketIndexToMetasForCompactionJobPlanning(t *testing.T) {
+ twoHoursMS := 2 * time.Hour.Milliseconds()
- // This merge job is skipped, as block is marked for no-compaction.
- &bucketindex.Block{ID: ulid.MustNew(ulid.Now(), rand.Reader), MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "3_of_3"},
- &bucketindex.Block{ID: blockMarkedForNoCompact, MinTime: dayMS, MaxTime: 2 * dayMS, CompactorShardID: "3_of_3"},
+ makeUlid := func(n byte) ulid.ULID {
+ return ulid.ULID{n}
}
- userBucket := bucket.NewUserBucketClient(user, bucketClient, nil)
- // Mark block for no-compaction.
- require.NoError(t, block.MarkForNoCompact(context.Background(), log.NewNopLogger(), userBucket, blockMarkedForNoCompact, block.CriticalNoCompactReason, "testing", promauto.With(nil).NewCounter(prometheus.CounterOpts{})))
+ makeMeta := func(id ulid.ULID, labels map[string]string) *block.Meta {
+ return &block.Meta{
+ BlockMeta: prom_tsdb.BlockMeta{
+ ULID: id,
+ MinTime: 0,
+ MaxTime: twoHoursMS,
+ Version: block.TSDBVersion1,
+ },
+ Thanos: block.ThanosMeta{
+ Version: block.ThanosVersion1,
+ Labels: labels,
+ },
+ }
+ }
- // No grouping of jobs for split-compaction. All jobs will be in single split compaction.
- jobs, err := estimateCompactionJobsFromBucketIndex(context.Background(), user, userBucket, &index, cfg.CompactionBlockRanges, 3, 0)
- require.NoError(t, err)
- split, merge := computeSplitAndMergeJobs(jobs)
- require.Equal(t, 1, split)
- require.Equal(t, 2, merge)
+ cases := map[string]struct {
+ index *bucketindex.Index
+ expectedMetas map[ulid.ULID]*block.Meta
+ }{
+ "empty": {
+ index: &bucketindex.Index{Blocks: bucketindex.Blocks{}},
+ expectedMetas: map[ulid.ULID]*block.Meta{},
+ },
+ "basic": {
+ index: &bucketindex.Index{
+ Blocks: bucketindex.Blocks{
+ &bucketindex.Block{ID: makeUlid(1), MinTime: 0, MaxTime: twoHoursMS},
+ },
+ },
+ expectedMetas: map[ulid.ULID]*block.Meta{
+ makeUlid(1): makeMeta(makeUlid(1), map[string]string{}),
+ },
+ },
+ "adopt shard ID": {
+ index: &bucketindex.Index{
+ Blocks: bucketindex.Blocks{
+ &bucketindex.Block{ID: makeUlid(1), MinTime: 0, MaxTime: twoHoursMS, CompactorShardID: "78"},
+ },
+ },
+ expectedMetas: map[ulid.ULID]*block.Meta{
+ makeUlid(1): makeMeta(makeUlid(1), map[string]string{tsdb.CompactorShardIDExternalLabel: "78"}),
+ },
+ },
+ "use labeled shard ID": {
+ index: &bucketindex.Index{
+ Blocks: bucketindex.Blocks{
+ &bucketindex.Block{ID: makeUlid(1), MinTime: 0, MaxTime: twoHoursMS,
+ Labels: map[string]string{tsdb.CompactorShardIDExternalLabel: "3"}},
+ },
+ },
+ expectedMetas: map[ulid.ULID]*block.Meta{
+ makeUlid(1): makeMeta(makeUlid(1), map[string]string{tsdb.CompactorShardIDExternalLabel: "3"}),
+ },
+ },
+ "don't overwrite labeled shard ID": {
+ index: &bucketindex.Index{
+ Blocks: bucketindex.Blocks{
+ &bucketindex.Block{ID: makeUlid(1), MinTime: 0, MaxTime: twoHoursMS, CompactorShardID: "78",
+ Labels: map[string]string{tsdb.CompactorShardIDExternalLabel: "3"}},
+ },
+ },
+ expectedMetas: map[ulid.ULID]*block.Meta{
+ makeUlid(1): makeMeta(makeUlid(1), map[string]string{tsdb.CompactorShardIDExternalLabel: "3"}),
+ },
+ },
+ "honor deletion marks": {
+ index: &bucketindex.Index{
+ BlockDeletionMarks: bucketindex.BlockDeletionMarks{
+ &bucketindex.BlockDeletionMark{ID: makeUlid(14)},
+ },
+ Blocks: bucketindex.Blocks{
+ &bucketindex.Block{ID: makeUlid(14), MinTime: 0, MaxTime: twoHoursMS},
+ },
+ },
+ expectedMetas: map[ulid.ULID]*block.Meta{},
+ },
+ "excess deletes": {
+ index: &bucketindex.Index{
+ BlockDeletionMarks: bucketindex.BlockDeletionMarks{
+ &bucketindex.BlockDeletionMark{ID: makeUlid(15)},
+ &bucketindex.BlockDeletionMark{ID: makeUlid(16)},
+ },
+ Blocks: bucketindex.Blocks{
+ &bucketindex.Block{ID: makeUlid(14), MinTime: 0, MaxTime: twoHoursMS},
+ },
+ },
+ expectedMetas: map[ulid.ULID]*block.Meta{
+ makeUlid(14): makeMeta(makeUlid(14), map[string]string{}),
+ },
+ },
+ }
+
+ for name, c := range cases {
+ t.Run(name, func(t *testing.T) {
+ m := ConvertBucketIndexToMetasForCompactionJobPlanning(c.index)
+ require.Equal(t, c.expectedMetas, m)
+ })
+ }
}
type mockBucketFailure struct {
diff --git a/pkg/compactor/bucket_compactor.go b/pkg/compactor/bucket_compactor.go
index a31b9fe6e67..8c245d4b2c8 100644
--- a/pkg/compactor/bucket_compactor.go
+++ b/pkg/compactor/bucket_compactor.go
@@ -11,6 +11,7 @@ import (
"os"
"path"
"path/filepath"
+ "strconv"
"strings"
"sync"
"time"
@@ -433,6 +434,13 @@ func (c *BucketCompactor) runCompactionJob(ctx context.Context, job *Job) (shoul
// into the next planning cycle.
// Eventually the block we just uploaded should get synced into the job again (including sync-delay).
for _, meta := range toCompact {
+ attrs, err := block.GetMetaAttributes(ctx, meta, c.bkt)
+ if err != nil {
+ level.Warn(jobLogger).Log("msg", "failed to determine block upload time", "block", meta.ULID.String(), "err", err)
+ } else {
+ c.metrics.blockCompactionDelay.WithLabelValues(strconv.Itoa(meta.Compaction.Level)).Observe(compactionBegin.Sub(attrs.LastModified).Seconds())
+ }
+
if err := deleteBlock(c.bkt, meta.ULID, filepath.Join(subDir, meta.ULID.String()), jobLogger, c.metrics.blocksMarkedForDeletion); err != nil {
return false, nil, errors.Wrapf(err, "mark old block for deletion from bucket")
}
@@ -645,6 +653,7 @@ type BucketCompactorMetrics struct {
groupCompactionRunsCompleted prometheus.Counter
groupCompactionRunsFailed prometheus.Counter
groupCompactions prometheus.Counter
+ blockCompactionDelay *prometheus.HistogramVec
compactionBlocksVerificationFailed prometheus.Counter
blocksMarkedForDeletion prometheus.Counter
blocksMarkedForNoCompact *prometheus.CounterVec
@@ -670,6 +679,14 @@ func NewBucketCompactorMetrics(blocksMarkedForDeletion prometheus.Counter, reg p
Name: "cortex_compactor_group_compactions_total",
Help: "Total number of group compaction attempts that resulted in new block(s).",
}),
+ blockCompactionDelay: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
+ Name: "cortex_compactor_block_compaction_delay_seconds",
+ Help: "Delay between a block being uploaded and successfully compacting it.",
+ Buckets: []float64{60.0, 300.0, 600.0, 1800.0, 3600.0, 7200.0, 10800.0, 14400.0, 18000.0, 36000.0, 72000.0},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ }, []string{"level"}),
compactionBlocksVerificationFailed: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "cortex_compactor_blocks_verification_failures_total",
Help: "Total number of failures when verifying min/max time ranges of compacted blocks.",
@@ -694,7 +711,7 @@ func NewBucketCompactorMetrics(blocksMarkedForDeletion prometheus.Counter, reg p
type ownCompactionJobFunc func(job *Job) (bool, error)
// ownAllJobs is a ownCompactionJobFunc that always return true.
-var ownAllJobs = func(job *Job) (bool, error) {
+var ownAllJobs = func(*Job) (bool, error) {
return true, nil
}
diff --git a/pkg/compactor/bucket_compactor_test.go b/pkg/compactor/bucket_compactor_test.go
index 67e550d0642..0f999a694bd 100644
--- a/pkg/compactor/bucket_compactor_test.go
+++ b/pkg/compactor/bucket_compactor_test.go
@@ -94,13 +94,13 @@ func TestBucketCompactor_FilterOwnJobs(t *testing.T) {
expectedJobs int
}{
"should return all planned jobs if the compactor instance owns all of them": {
- ownJob: func(job *Job) (bool, error) {
+ ownJob: func(*Job) (bool, error) {
return true, nil
},
expectedJobs: 4,
},
"should return no jobs if the compactor instance owns none of them": {
- ownJob: func(job *Job) (bool, error) {
+ ownJob: func(*Job) (bool, error) {
return false, nil
},
expectedJobs: 0,
@@ -108,7 +108,7 @@ func TestBucketCompactor_FilterOwnJobs(t *testing.T) {
"should return some jobs if the compactor instance owns some of them": {
ownJob: func() ownCompactionJobFunc {
count := 0
- return func(job *Job) (bool, error) {
+ return func(*Job) (bool, error) {
count++
return count%2 == 0, nil
}
diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go
index 5fead72d4a5..76b136baa9f 100644
--- a/pkg/compactor/compactor.go
+++ b/pkg/compactor/compactor.go
@@ -60,6 +60,14 @@ var (
errInvalidSymbolFlushersConcurrency = fmt.Errorf("invalid symbols-flushers-concurrency value, must be positive")
errInvalidMaxBlockUploadValidationConcurrency = fmt.Errorf("invalid max-block-upload-validation-concurrency value, can't be negative")
RingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, nil)
+
+ // compactionIgnoredLabels defines the external labels that compactor will
+ // drop/ignore when planning jobs so that they don't keep blocks from
+ // compacting together.
+ compactionIgnoredLabels = []string{
+ mimir_tsdb.DeprecatedIngesterIDExternalLabel,
+ mimir_tsdb.DeprecatedTenantIDExternalLabel,
+ }
)
// BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks.
@@ -733,14 +741,7 @@ func (c *MultitenantCompactor) compactUser(ctx context.Context, userID string) e
// List of filters to apply (order matters).
fetcherFilters := []block.MetadataFilter{
- // Remove the ingester ID because we don't shard blocks anymore, while still
- // honoring the shard ID if sharding was done in the past.
- // Remove TenantID external label to make sure that we compact blocks with and without the label
- // together.
- NewLabelRemoverFilter([]string{
- mimir_tsdb.DeprecatedTenantIDExternalLabel,
- mimir_tsdb.DeprecatedIngesterIDExternalLabel,
- }),
+ NewLabelRemoverFilter(compactionIgnoredLabels),
deduplicateBlocksFilter,
// removes blocks that should not be compacted due to being marked so.
NewNoCompactionMarkFilter(userBucket),
diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go
index b1984487c56..208446650b8 100644
--- a/pkg/compactor/compactor_test.go
+++ b/pkg/compactor/compactor_test.go
@@ -95,7 +95,7 @@ func TestConfig_Validate(t *testing.T) {
expected string
}{
"should pass with the default config": {
- setup: func(cfg *Config) {},
+ setup: func(*Config) {},
expected: "",
},
"should pass with only 1 block range period": {
@@ -1352,7 +1352,7 @@ func TestMultitenantCompactor_ShouldSkipCompactionForJobsNoMoreOwnedAfterPlannin
c, _, tsdbPlanner, logs, registry := prepareWithConfigProvider(t, cfg, bucketClient, limits)
// Mock the planner as if there's no compaction to do, in order to simplify tests.
- tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*block.Meta{}, nil).Run(func(args mock.Arguments) {
+ tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*block.Meta{}, nil).Run(func(mock.Arguments) {
// As soon as the first Plan() is called by the compactor, we do switch
// the instance to LEAVING state. This way, after this call, we expect the compactor
// to skip next compaction job because not owned anymore by this instance.
@@ -1783,11 +1783,11 @@ func prepareWithConfigProvider(t *testing.T, compactorCfg Config, bucketClient o
logger := &componentLogger{component: "compactor", log: log.NewLogfmtLogger(logs)}
registry := prometheus.NewRegistry()
- bucketClientFactory := func(ctx context.Context) (objstore.Bucket, error) {
+ bucketClientFactory := func(context.Context) (objstore.Bucket, error) {
return bucketClient, nil
}
- blocksCompactorFactory := func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (Compactor, Planner, error) {
+ blocksCompactorFactory := func(context.Context, Config, log.Logger, prometheus.Registerer) (Compactor, Planner, error) {
return tsdbCompactor, tsdbPlanner, nil
}
diff --git a/pkg/compactor/job.go b/pkg/compactor/job.go
index ffae8878b48..b4794a72f6c 100644
--- a/pkg/compactor/job.go
+++ b/pkg/compactor/job.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
"math"
- "path"
"sort"
"time"
@@ -174,11 +173,9 @@ func jobWaitPeriodElapsed(ctx context.Context, job *Job, waitPeriod time.Duratio
continue
}
- metaPath := path.Join(meta.ULID.String(), block.MetaFilename)
-
- attrs, err := userBucket.Attributes(ctx, metaPath)
+ attrs, err := block.GetMetaAttributes(ctx, meta, userBucket)
if err != nil {
- return false, meta, errors.Wrapf(err, "unable to get object attributes for %s", metaPath)
+ return false, meta, err
}
if attrs.LastModified.After(threshold) {
diff --git a/pkg/continuoustest/config.go b/pkg/continuoustest/config.go
new file mode 100644
index 00000000000..8232cc99695
--- /dev/null
+++ b/pkg/continuoustest/config.go
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+
+package continuoustest
+
+import (
+ "flag"
+)
+
+type Config struct {
+ Client ClientConfig `yaml:"-"`
+ Manager ManagerConfig `yaml:"-"`
+ WriteReadSeriesTest WriteReadSeriesTestConfig `yaml:"-"`
+}
+
+func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
+ cfg.Client.RegisterFlags(f)
+ cfg.Manager.RegisterFlags(f)
+ cfg.WriteReadSeriesTest.RegisterFlags(f)
+}
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 175ab6298c9..a15839e3f2a 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -706,12 +706,8 @@ func (d *Distributor) wrapPushWithMiddlewares(next PushFunc) PushFunc {
func (d *Distributor) prePushHaDedupeMiddleware(next PushFunc) PushFunc {
return func(ctx context.Context, pushReq *Request) error {
- cleanupInDefer := true
- defer func() {
- if cleanupInDefer {
- pushReq.CleanUp()
- }
- }()
+ next, maybeCleanup := nextOrCleanup(next, pushReq)
+ defer maybeCleanup()
req, err := pushReq.WriteRequest()
if err != nil {
@@ -724,7 +720,6 @@ func (d *Distributor) prePushHaDedupeMiddleware(next PushFunc) PushFunc {
}
if len(req.Timeseries) == 0 || !d.limits.AcceptHASamples(userID) {
- cleanupInDefer = false
return next(ctx, pushReq)
}
@@ -771,19 +766,14 @@ func (d *Distributor) prePushHaDedupeMiddleware(next PushFunc) PushFunc {
d.nonHASamples.WithLabelValues(userID).Add(float64(numSamples))
}
- cleanupInDefer = false
return next(ctx, pushReq)
}
}
func (d *Distributor) prePushRelabelMiddleware(next PushFunc) PushFunc {
return func(ctx context.Context, pushReq *Request) error {
- cleanupInDefer := true
- defer func() {
- if cleanupInDefer {
- pushReq.CleanUp()
- }
- }()
+ next, maybeCleanup := nextOrCleanup(next, pushReq)
+ defer maybeCleanup()
userID, err := tenant.TenantID(ctx)
if err != nil {
@@ -791,7 +781,6 @@ func (d *Distributor) prePushRelabelMiddleware(next PushFunc) PushFunc {
}
if !d.limits.MetricRelabelingEnabled(userID) {
- cleanupInDefer = false
return next(ctx, pushReq)
}
@@ -834,7 +823,6 @@ func (d *Distributor) prePushRelabelMiddleware(next PushFunc) PushFunc {
req.Timeseries = util.RemoveSliceIndexes(req.Timeseries, removeTsIndexes)
}
- cleanupInDefer = false
return next(ctx, pushReq)
}
}
@@ -843,12 +831,8 @@ func (d *Distributor) prePushRelabelMiddleware(next PushFunc) PushFunc {
// filtering empty values. This is a protection mechanism for ingesters.
func (d *Distributor) prePushSortAndFilterMiddleware(next PushFunc) PushFunc {
return func(ctx context.Context, pushReq *Request) error {
- cleanupInDefer := true
- defer func() {
- if cleanupInDefer {
- pushReq.CleanUp()
- }
- }()
+ next, maybeCleanup := nextOrCleanup(next, pushReq)
+ defer maybeCleanup()
req, err := pushReq.WriteRequest()
if err != nil {
@@ -884,19 +868,14 @@ func (d *Distributor) prePushSortAndFilterMiddleware(next PushFunc) PushFunc {
req.Timeseries = util.RemoveSliceIndexes(req.Timeseries, removeTsIndexes)
}
- cleanupInDefer = false
return next(ctx, pushReq)
}
}
func (d *Distributor) prePushValidationMiddleware(next PushFunc) PushFunc {
return func(ctx context.Context, pushReq *Request) error {
- cleanupInDefer := true
- defer func() {
- if cleanupInDefer {
- pushReq.CleanUp()
- }
- }()
+ next, maybeCleanup := nextOrCleanup(next, pushReq)
+ defer maybeCleanup()
req, err := pushReq.WriteRequest()
if err != nil {
@@ -1020,7 +999,6 @@ func (d *Distributor) prePushValidationMiddleware(next PushFunc) PushFunc {
// totalN included samples, exemplars and metadata. Ingester follows this pattern when computing its ingestion rate.
d.ingestionRate.Add(int64(totalN))
- cleanupInDefer = false
err = next(ctx, pushReq)
if err != nil {
// Errors resulting from the pushing to the ingesters have priority over validation errors.
@@ -1035,12 +1013,8 @@ func (d *Distributor) prePushValidationMiddleware(next PushFunc) PushFunc {
// including data that later gets modified or dropped.
func (d *Distributor) metricsMiddleware(next PushFunc) PushFunc {
return func(ctx context.Context, pushReq *Request) error {
- cleanupInDefer := true
- defer func() {
- if cleanupInDefer {
- pushReq.CleanUp()
- }
- }()
+ next, maybeCleanup := nextOrCleanup(next, pushReq)
+ defer maybeCleanup()
req, err := pushReq.WriteRequest()
if err != nil {
@@ -1071,7 +1045,6 @@ func (d *Distributor) metricsMiddleware(next PushFunc) PushFunc {
d.incomingExemplars.WithLabelValues(userID).Add(float64(numExemplars))
d.incomingMetadata.WithLabelValues(userID).Add(float64(len(req.Metadata)))
- cleanupInDefer = false
return next(ctx, pushReq)
}
}
@@ -1228,12 +1201,8 @@ func (d *Distributor) limitsMiddleware(next PushFunc) PushFunc {
d.cleanupAfterPushFinished(rs)
})
- cleanupInDefer := true
- defer func() {
- if cleanupInDefer {
- pushReq.CleanUp()
- }
- }()
+ next, maybeCleanup := nextOrCleanup(next, pushReq)
+ defer maybeCleanup()
userID, err := tenant.TenantID(ctx)
if err != nil {
@@ -1259,11 +1228,25 @@ func (d *Distributor) limitsMiddleware(next PushFunc) PushFunc {
return err
}
- cleanupInDefer = false
return next(ctx, pushReq)
}
}
+// nextOrCleanup returns a new PushFunc and a cleanup function that should be deferred by the caller.
+// The cleanup function will only call Request.CleanUp() if next() wasn't called previously.
+func nextOrCleanup(next PushFunc, pushReq *Request) (_ PushFunc, maybeCleanup func()) {
+ cleanupInDefer := true
+ return func(ctx context.Context, req *Request) error {
+ cleanupInDefer = false
+ return next(ctx, req)
+ },
+ func() {
+ if cleanupInDefer {
+ pushReq.CleanUp()
+ }
+ }
+}
+
// Push is gRPC method registered as client.IngesterServer and distributor.DistributorServer.
func (d *Distributor) Push(ctx context.Context, req *mimirpb.WriteRequest) (*mimirpb.WriteResponse, error) {
pushReq := NewParsedRequest(req)
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index fbd8632aead..59505e98479 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -1004,7 +1004,7 @@ func TestDistributor_PushHAInstances(t *testing.T) {
testReplica: "instance1234567890123456789012345678901234567890",
cluster: "cluster0",
samples: 5,
- expectedError: status.New(codes.FailedPrecondition, fmt.Sprintf(labelValueTooLongMsgFormat, "instance1234567890123456789012345678901234567890", mimirpb.FromLabelAdaptersToString(labelSetGenWithReplicaAndCluster("instance1234567890123456789012345678901234567890", "cluster0")(0)))),
+ expectedError: status.New(codes.FailedPrecondition, fmt.Sprintf(labelValueTooLongMsgFormat, "__replica__", "instance1234567890123456789012345678901234567890", mimirpb.FromLabelAdaptersToString(labelSetGenWithReplicaAndCluster("instance1234567890123456789012345678901234567890", "cluster0")(0)))),
expectedDetails: &mimirpb.ErrorDetails{Cause: mimirpb.BAD_DATA},
},
} {
@@ -1885,7 +1885,7 @@ func BenchmarkDistributor_Push(b *testing.B) {
expectedErr string
}{
"all samples successfully pushed": {
- prepareConfig: func(limits *validation.Limits) {},
+ prepareConfig: func(*validation.Limits) {},
prepareSeries: func() ([][]mimirpb.LabelAdapter, []mimirpb.Sample) {
metrics := make([][]mimirpb.LabelAdapter, numSeriesPerRequest)
samples := make([]mimirpb.Sample, numSeriesPerRequest)
@@ -2075,7 +2075,7 @@ func BenchmarkDistributor_Push(b *testing.B) {
limits.IngestionRate = float64(rate.Inf) // Unlimited.
testData.prepareConfig(&limits)
- distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(inst ring.InstanceDesc) (ring_client.PoolClient, error) {
+ distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(ring.InstanceDesc) (ring_client.PoolClient, error) {
return &noopIngester{}, nil
})
@@ -4397,7 +4397,7 @@ func TestHaDedupeMiddleware(t *testing.T) {
nextCallCount := 0
var gotReqs []*mimirpb.WriteRequest
- next := func(ctx context.Context, pushReq *Request) error {
+ next := func(_ context.Context, pushReq *Request) error {
nextCallCount++
req, err := pushReq.WriteRequest()
require.NoError(t, err)
@@ -4463,7 +4463,7 @@ func TestInstanceLimitsBeforeHaDedupe(t *testing.T) {
// Capture the submitted write requests which the middlewares pass into the mock push function.
var submittedWriteReqs []*mimirpb.WriteRequest
- mockPush := func(ctx context.Context, pushReq *Request) error {
+ mockPush := func(_ context.Context, pushReq *Request) error {
defer pushReq.CleanUp()
writeReq, err := pushReq.WriteRequest()
require.NoError(t, err)
@@ -4646,7 +4646,7 @@ func TestRelabelMiddleware(t *testing.T) {
}
var gotReqs []*mimirpb.WriteRequest
- next := func(ctx context.Context, pushReq *Request) error {
+ next := func(_ context.Context, pushReq *Request) error {
req, err := pushReq.WriteRequest()
require.NoError(t, err)
gotReqs = append(gotReqs, req)
@@ -4724,7 +4724,7 @@ func TestSortAndFilterMiddleware(t *testing.T) {
}
var gotReqs []*mimirpb.WriteRequest
- next := func(ctx context.Context, pushReq *Request) error {
+ next := func(_ context.Context, pushReq *Request) error {
req, err := pushReq.WriteRequest()
require.NoError(t, err)
gotReqs = append(gotReqs, req)
@@ -6685,7 +6685,7 @@ func TestDistributor_MetricsWithRequestModifications(t *testing.T) {
exemplarLabelGen := func(sampleIdx int) []mimirpb.LabelAdapter {
return []mimirpb.LabelAdapter{{Name: "exemplarLabel", Value: fmt.Sprintf("value_%d", sampleIdx)}}
}
- metaDataGen := func(metricIdx int, metricName string) *mimirpb.MetricMetadata {
+ metaDataGen := func(_ int, metricName string) *mimirpb.MetricMetadata {
return &mimirpb.MetricMetadata{
Type: mimirpb.COUNTER,
MetricFamilyName: metricName,
@@ -7039,7 +7039,7 @@ func TestSeriesAreShardedToCorrectIngesters(t *testing.T) {
exemplarLabelGen := func(sampleIdx int) []mimirpb.LabelAdapter {
return []mimirpb.LabelAdapter{{Name: "exemplarLabel", Value: fmt.Sprintf("value_%d", sampleIdx)}}
}
- metaDataGen := func(metricIdx int, metricName string) *mimirpb.MetricMetadata {
+ metaDataGen := func(_ int, metricName string) *mimirpb.MetricMetadata {
return &mimirpb.MetricMetadata{
Type: mimirpb.COUNTER,
MetricFamilyName: metricName,
@@ -7430,7 +7430,7 @@ func TestSendMessageMetadata(t *testing.T) {
require.NoError(t, err)
mock := &mockInstanceClient{}
- distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(inst ring.InstanceDesc) (ring_client.PoolClient, error) {
+ distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(ring.InstanceDesc) (ring_client.PoolClient, error) {
return mock, nil
})
diff --git a/pkg/distributor/otel_test.go b/pkg/distributor/otel_test.go
index f7039f454a0..41860df6411 100644
--- a/pkg/distributor/otel_test.go
+++ b/pkg/distributor/otel_test.go
@@ -54,7 +54,7 @@ func BenchmarkOTLPHandler(b *testing.B) {
}
exportReq := TimeseriesToOTLPRequest(sampleSeries, sampleMetadata)
- pushFunc := func(ctx context.Context, pushReq *Request) error {
+ pushFunc := func(_ context.Context, pushReq *Request) error {
if _, err := pushReq.WriteRequest(); err != nil {
return err
}
diff --git a/pkg/distributor/push_test.go b/pkg/distributor/push_test.go
index dcbca4a3dbb..0354c657cd5 100644
--- a/pkg/distributor/push_test.go
+++ b/pkg/distributor/push_test.go
@@ -219,7 +219,7 @@ func TestHandlerOTLPPush(t *testing.T) {
maxMsgSize: 30,
series: sampleSeries,
metadata: sampleMetadata,
- verifyFunc: func(t *testing.T, pushReq *Request) error {
+ verifyFunc: func(_ *testing.T, pushReq *Request) error {
_, err := pushReq.WriteRequest()
return err
},
@@ -232,7 +232,7 @@ func TestHandlerOTLPPush(t *testing.T) {
maxMsgSize: 100000,
series: sampleSeries,
metadata: sampleMetadata,
- verifyFunc: func(t *testing.T, pushReq *Request) error {
+ verifyFunc: func(_ *testing.T, pushReq *Request) error {
_, err := pushReq.WriteRequest()
return err
},
@@ -295,7 +295,7 @@ func TestHandlerOTLPPush(t *testing.T) {
validation.NewMockTenantLimits(map[string]*validation.Limits{}),
)
require.NoError(t, err)
- pusher := func(ctx context.Context, pushReq *Request) error {
+ pusher := func(_ context.Context, pushReq *Request) error {
t.Helper()
t.Cleanup(pushReq.CleanUp)
return tt.verifyFunc(t, pushReq)
@@ -361,7 +361,7 @@ func TestHandler_otlpDroppedMetricsPanic(t *testing.T) {
req := createOTLPProtoRequest(t, pmetricotlp.NewExportRequestFromMetrics(md), false)
resp := httptest.NewRecorder()
- handler := OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(ctx context.Context, pushReq *Request) error {
+ handler := OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
assert.NoError(t, err)
assert.Len(t, request.Timeseries, 3)
@@ -407,7 +407,7 @@ func TestHandler_otlpDroppedMetricsPanic2(t *testing.T) {
req := createOTLPProtoRequest(t, pmetricotlp.NewExportRequestFromMetrics(md), false)
resp := httptest.NewRecorder()
- handler := OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(ctx context.Context, pushReq *Request) error {
+ handler := OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
assert.NoError(t, err)
assert.Len(t, request.Timeseries, 2)
@@ -433,7 +433,7 @@ func TestHandler_otlpDroppedMetricsPanic2(t *testing.T) {
req = createOTLPProtoRequest(t, pmetricotlp.NewExportRequestFromMetrics(md), false)
resp = httptest.NewRecorder()
- handler = OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(ctx context.Context, pushReq *Request) error {
+ handler = OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
assert.NoError(t, err)
assert.Len(t, request.Timeseries, 10) // 6 buckets (including +Inf) + 2 sum/count + 2 from the first case
@@ -503,7 +503,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) {
name: "config flag set to false means SkipLabelNameValidation is false",
allowSkipLabelNameValidation: false,
req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, false)),
- verifyReqHandler: func(ctx context.Context, pushReq *Request) error {
+ verifyReqHandler: func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
assert.NoError(t, err)
assert.Len(t, request.Timeseries, 1)
@@ -521,7 +521,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) {
name: "config flag set to false means SkipLabelNameValidation is always false even if write requests sets it to true",
allowSkipLabelNameValidation: false,
req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, true)),
- verifyReqHandler: func(ctx context.Context, pushReq *Request) error {
+ verifyReqHandler: func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
require.NoError(t, err)
t.Cleanup(pushReq.CleanUp)
@@ -539,7 +539,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) {
name: "config flag set to true but write request set to false means SkipLabelNameValidation is false",
allowSkipLabelNameValidation: true,
req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, false)),
- verifyReqHandler: func(ctx context.Context, pushReq *Request) error {
+ verifyReqHandler: func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
assert.NoError(t, err)
assert.Len(t, request.Timeseries, 1)
@@ -556,7 +556,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) {
name: "config flag set to true and write request set to true means SkipLabelNameValidation is true",
allowSkipLabelNameValidation: true,
req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, true)),
- verifyReqHandler: func(ctx context.Context, pushReq *Request) error {
+ verifyReqHandler: func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
assert.NoError(t, err)
assert.Len(t, request.Timeseries, 1)
@@ -573,7 +573,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) {
name: "config flag set to true and write request set to true but header not sent means SkipLabelNameValidation is false",
allowSkipLabelNameValidation: true,
req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, true)),
- verifyReqHandler: func(ctx context.Context, pushReq *Request) error {
+ verifyReqHandler: func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
assert.NoError(t, err)
assert.Len(t, request.Timeseries, 1)
@@ -603,7 +603,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) {
func verifyWritePushFunc(t *testing.T, expectSource mimirpb.WriteRequest_SourceEnum) PushFunc {
t.Helper()
- return func(ctx context.Context, pushReq *Request) error {
+ return func(_ context.Context, pushReq *Request) error {
request, err := pushReq.WriteRequest()
require.NoError(t, err)
t.Cleanup(pushReq.CleanUp)
@@ -618,7 +618,7 @@ func verifyWritePushFunc(t *testing.T, expectSource mimirpb.WriteRequest_SourceE
func readBodyPushFunc(t *testing.T) PushFunc {
t.Helper()
- return func(ctx context.Context, req *Request) error {
+ return func(_ context.Context, req *Request) error {
_, err := req.WriteRequest()
return err
}
@@ -706,7 +706,7 @@ func BenchmarkPushHandler(b *testing.B) {
protobuf := createPrometheusRemoteWriteProtobuf(b)
buf := bytes.NewBuffer(snappy.Encode(nil, protobuf))
req := createRequest(b, protobuf)
- pushFunc := func(ctx context.Context, pushReq *Request) error {
+ pushFunc := func(_ context.Context, pushReq *Request) error {
if _, err := pushReq.WriteRequest(); err != nil {
return err
}
@@ -764,7 +764,7 @@ func TestHandler_ErrorTranslation(t *testing.T) {
parserFunc := func(context.Context, *http.Request, int, *util.RequestBuffers, *mimirpb.PreallocWriteRequest, log.Logger) error {
return tc.err
}
- pushFunc := func(ctx context.Context, req *Request) error {
+ pushFunc := func(_ context.Context, req *Request) error {
_, err := req.WriteRequest() // just read the body so we can trigger the parser
return err
}
@@ -831,7 +831,7 @@ func TestHandler_ErrorTranslation(t *testing.T) {
parserFunc := func(context.Context, *http.Request, int, *util.RequestBuffers, *mimirpb.PreallocWriteRequest, log.Logger) error {
return nil
}
- pushFunc := func(ctx context.Context, req *Request) error {
+ pushFunc := func(_ context.Context, req *Request) error {
_, err := req.WriteRequest() // just read the body so we can trigger the parser
if err != nil {
return err
diff --git a/pkg/distributor/validate.go b/pkg/distributor/validate.go
index af2c6e3f82e..1b06e683859 100644
--- a/pkg/distributor/validate.go
+++ b/pkg/distributor/validate.go
@@ -65,7 +65,7 @@ var (
validation.MaxLabelNameLengthFlag,
)
labelValueTooLongMsgFormat = globalerror.SeriesLabelValueTooLong.MessageWithPerTenantLimitConfig(
- "received a series whose label value length exceeds the limit, value: '%.200s' (truncated) series: '%.200s'",
+ "received a series whose label value length exceeds the limit, label: '%s', value: '%.200s' (truncated) series: '%.200s'",
validation.MaxLabelValueLengthFlag,
)
invalidLabelMsgFormat = globalerror.SeriesInvalidLabel.Message("received a series with an invalid label: '%.200s' series: '%.200s'")
@@ -370,7 +370,7 @@ func validateLabels(m *sampleValidationMetrics, cfg labelValidationConfig, userI
return fmt.Errorf(labelNameTooLongMsgFormat, l.Name, mimirpb.FromLabelAdaptersToString(ls))
} else if len(l.Value) > maxLabelValueLength {
m.labelValueTooLong.WithLabelValues(userID, group).Inc()
- return fmt.Errorf(labelValueTooLongMsgFormat, l.Value, mimirpb.FromLabelAdaptersToString(ls))
+ return fmt.Errorf(labelValueTooLongMsgFormat, l.Name, l.Value, mimirpb.FromLabelAdaptersToString(ls))
} else if lastLabelName == l.Name {
m.duplicateLabelNames.WithLabelValues(userID, group).Inc()
return fmt.Errorf(duplicateLabelMsgFormat, l.Name, mimirpb.FromLabelAdaptersToString(ls))
diff --git a/pkg/distributor/validate_test.go b/pkg/distributor/validate_test.go
index 0508d1b2724..485e0c5f9bf 100644
--- a/pkg/distributor/validate_test.go
+++ b/pkg/distributor/validate_test.go
@@ -123,6 +123,7 @@ func TestValidateLabels(t *testing.T) {
false,
fmt.Errorf(
labelValueTooLongMsgFormat,
+ "much_shorter_name",
"test_value_please_ignore_no_really_nothing_to_see_here",
mimirpb.FromLabelAdaptersToString(
[]mimirpb.LabelAdapter{
diff --git a/pkg/frontend/frontend_test.go b/pkg/frontend/frontend_test.go
index 4f2a9fccc01..44ac58f58be 100644
--- a/pkg/frontend/frontend_test.go
+++ b/pkg/frontend/frontend_test.go
@@ -105,7 +105,7 @@ func TestFrontend_LogsSlowQueriesFormValues(t *testing.T) {
require.NoError(t, err)
downstreamServer := http.Server{
- Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
_, err := w.Write([]byte(responseBody))
require.NoError(t, err)
}),
@@ -167,7 +167,7 @@ func TestFrontend_ReturnsRequestBodyTooLargeError(t *testing.T) {
require.NoError(t, err)
downstreamServer := http.Server{
- Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
_, err := w.Write([]byte(responseBody))
require.NoError(t, err)
}),
diff --git a/pkg/frontend/querymiddleware/astmapper/subtree_folder_test.go b/pkg/frontend/querymiddleware/astmapper/subtree_folder_test.go
index 3d7d8fab8ff..383dff3a081 100644
--- a/pkg/frontend/querymiddleware/astmapper/subtree_folder_test.go
+++ b/pkg/frontend/querymiddleware/astmapper/subtree_folder_test.go
@@ -22,7 +22,7 @@ func TestEvalPredicate(t *testing.T) {
}{
"should return error if the predicate returns error": {
input: "selector1{} or selector2{}",
- fn: func(node parser.Node) (bool, error) {
+ fn: func(parser.Node) (bool, error) {
return false, errors.New("some err")
},
expectedRes: false,
@@ -30,7 +30,7 @@ func TestEvalPredicate(t *testing.T) {
},
"should return false if the predicate returns false for all nodes in the subtree": {
input: "selector1{} or selector2{}",
- fn: func(node parser.Node) (bool, error) {
+ fn: func(parser.Node) (bool, error) {
return false, nil
},
expectedRes: false,
diff --git a/pkg/frontend/querymiddleware/blocker.go b/pkg/frontend/querymiddleware/blocker.go
index ae03126d289..5981234f18f 100644
--- a/pkg/frontend/querymiddleware/blocker.go
+++ b/pkg/frontend/querymiddleware/blocker.go
@@ -15,7 +15,7 @@ import (
)
type queryBlockerMiddleware struct {
- next Handler
+ next MetricsQueryHandler
limits Limits
logger log.Logger
blockedQueriesCounter *prometheus.CounterVec
@@ -25,12 +25,12 @@ func newQueryBlockerMiddleware(
limits Limits,
logger log.Logger,
registerer prometheus.Registerer,
-) Middleware {
+) MetricsQueryMiddleware {
blockedQueriesCounter := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{
Name: "cortex_query_frontend_rejected_queries_total",
Help: "Number of queries that were rejected by the cluster administrator.",
}, []string{"user", "reason"})
- return MiddlewareFunc(func(next Handler) Handler {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return &queryBlockerMiddleware{
next: next,
limits: limits,
@@ -40,7 +40,7 @@ func newQueryBlockerMiddleware(
})
}
-func (qb *queryBlockerMiddleware) Do(ctx context.Context, req Request) (Response, error) {
+func (qb *queryBlockerMiddleware) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) {
tenants, err := tenant.TenantIDs(ctx)
if err != nil {
return qb.next.Do(ctx, req)
@@ -56,7 +56,7 @@ func (qb *queryBlockerMiddleware) Do(ctx context.Context, req Request) (Response
return qb.next.Do(ctx, req)
}
-func (qb *queryBlockerMiddleware) isBlocked(tenant string, req Request) bool {
+func (qb *queryBlockerMiddleware) isBlocked(tenant string, req MetricsQueryRequest) bool {
blocks := qb.limits.BlockedQueries(tenant)
if len(blocks) <= 0 {
return false
diff --git a/pkg/frontend/querymiddleware/blocker_test.go b/pkg/frontend/querymiddleware/blocker_test.go
index d6530545c98..f52756a35de 100644
--- a/pkg/frontend/querymiddleware/blocker_test.go
+++ b/pkg/frontend/querymiddleware/blocker_test.go
@@ -20,7 +20,7 @@ import (
func Test_queryBlocker_Do(t *testing.T) {
tests := []struct {
name string
- request Request
+ request MetricsQueryRequest
shouldContinue bool
limits mockLimits
}{
@@ -28,7 +28,7 @@ func Test_queryBlocker_Do(t *testing.T) {
name: "doesn't block queries due to empty limits",
limits: mockLimits{},
shouldContinue: true,
- request: Request(&PrometheusRangeQueryRequest{
+ request: MetricsQueryRequest(&PrometheusRangeQueryRequest{
Query: "rate(metric_counter[5m])",
}),
},
@@ -39,7 +39,7 @@ func Test_queryBlocker_Do(t *testing.T) {
{Pattern: "rate(metric_counter[5m])", Regex: false},
},
},
- request: Request(&PrometheusRangeQueryRequest{
+ request: MetricsQueryRequest(&PrometheusRangeQueryRequest{
Query: "rate(metric_counter[5m])",
}),
},
@@ -52,7 +52,7 @@ func Test_queryBlocker_Do(t *testing.T) {
},
shouldContinue: true,
- request: Request(&PrometheusRangeQueryRequest{
+ request: MetricsQueryRequest(&PrometheusRangeQueryRequest{
Query: "rate(metric_counter[15m])",
}),
},
@@ -64,7 +64,7 @@ func Test_queryBlocker_Do(t *testing.T) {
rate(other_counter[5m])`, Regex: false},
},
},
- request: Request(&PrometheusRangeQueryRequest{
+ request: MetricsQueryRequest(&PrometheusRangeQueryRequest{
Query: `rate(metric_counter[5m])/
rate(other_counter[5m])`,
}),
@@ -78,7 +78,7 @@ rate(other_counter[5m])`,
},
shouldContinue: true,
- request: Request(&PrometheusRangeQueryRequest{
+ request: MetricsQueryRequest(&PrometheusRangeQueryRequest{
Query: `rate(metric_counter[15m])/
rate(other_counter[15m])`,
}),
@@ -90,7 +90,7 @@ rate(other_counter[15m])`,
{Pattern: ".*metric_counter.*", Regex: true},
},
},
- request: Request(&PrometheusRangeQueryRequest{
+ request: MetricsQueryRequest(&PrometheusRangeQueryRequest{
Query: "rate(metric_counter[5m])",
}),
},
@@ -102,7 +102,7 @@ rate(other_counter[15m])`,
{Pattern: "(?s).*metric_counter.*", Regex: true},
},
},
- request: Request(&PrometheusRangeQueryRequest{
+ request: MetricsQueryRequest(&PrometheusRangeQueryRequest{
Query: `rate(other_counter[15m])/
rate(metric_counter[15m])`,
}),
@@ -116,7 +116,7 @@ rate(other_counter[15m])`,
},
shouldContinue: true,
- request: Request(&PrometheusRangeQueryRequest{
+ request: MetricsQueryRequest(&PrometheusRangeQueryRequest{
Query: "rate(metric_counter[5m])",
}),
},
@@ -150,7 +150,7 @@ type mockNextHandler struct {
shouldContinue bool
}
-func (h *mockNextHandler) Do(_ context.Context, _ Request) (Response, error) {
+func (h *mockNextHandler) Do(_ context.Context, _ MetricsQueryRequest) (Response, error) {
if !h.shouldContinue {
h.t.Error("The next middleware should not be called.")
}
diff --git a/pkg/frontend/querymiddleware/cardinality.go b/pkg/frontend/querymiddleware/cardinality.go
index 2d9def88ffe..661caca372e 100644
--- a/pkg/frontend/querymiddleware/cardinality.go
+++ b/pkg/frontend/querymiddleware/cardinality.go
@@ -34,23 +34,23 @@ const (
cacheErrorToleranceFraction = 0.1
)
-// cardinalityEstimation is a Handler that caches estimates for a query's
+// cardinalityEstimation is a MetricsQueryHandler that caches estimates for a query's
// cardinality based on similar queries seen previously.
type cardinalityEstimation struct {
cache cache.Cache
- next Handler
+ next MetricsQueryHandler
logger log.Logger
estimationError prometheus.Histogram
}
-func newCardinalityEstimationMiddleware(cache cache.Cache, logger log.Logger, registerer prometheus.Registerer) Middleware {
+func newCardinalityEstimationMiddleware(cache cache.Cache, logger log.Logger, registerer prometheus.Registerer) MetricsQueryMiddleware {
estimationError := promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{
Name: "cortex_query_frontend_cardinality_estimation_difference",
Help: "Difference between estimated and actual query cardinality",
Buckets: prometheus.ExponentialBuckets(100, 2, 10),
})
- return MiddlewareFunc(func(next Handler) Handler {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return &cardinalityEstimation{
cache: cache,
next: next,
@@ -63,7 +63,7 @@ func newCardinalityEstimationMiddleware(cache cache.Cache, logger log.Logger, re
// Do injects a cardinality estimate into the query hints (if available) and
// caches the actual cardinality observed for this query.
-func (c *cardinalityEstimation) Do(ctx context.Context, request Request) (Response, error) {
+func (c *cardinalityEstimation) Do(ctx context.Context, request MetricsQueryRequest) (Response, error) {
spanLog := spanlogger.FromContext(ctx, c.logger)
tenants, err := tenant.TenantIDs(ctx)
@@ -162,7 +162,7 @@ func isCardinalitySimilar(actualCardinality, estimatedCardinality uint64) bool {
// with respect to both start time and range size. To avoid expiry of all
// estimates at the bucket boundary, an offset is added based on the hash of the
// query string.
-func generateCardinalityEstimationCacheKey(userID string, r Request, bucketSize time.Duration) string {
+func generateCardinalityEstimationCacheKey(userID string, r MetricsQueryRequest, bucketSize time.Duration) string {
hasher := fnv.New64a()
_, _ = hasher.Write([]byte(r.GetQuery()))
diff --git a/pkg/frontend/querymiddleware/cardinality_query_cache.go b/pkg/frontend/querymiddleware/cardinality_query_cache.go
index 12108723221..1d059cd38bc 100644
--- a/pkg/frontend/querymiddleware/cardinality_query_cache.go
+++ b/pkg/frontend/querymiddleware/cardinality_query_cache.go
@@ -3,10 +3,8 @@
package querymiddleware
import (
- "context"
"errors"
"net/http"
- "net/url"
"strings"
"time"
@@ -14,7 +12,9 @@ import (
"github.com/grafana/dskit/cache"
"github.com/prometheus/client_golang/prometheus"
+ apierror "github.com/grafana/mimir/pkg/api/error"
"github.com/grafana/mimir/pkg/cardinality"
+ "github.com/grafana/mimir/pkg/util"
)
const (
@@ -39,10 +39,15 @@ func (c *cardinalityQueryTTL) ttl(userID string) time.Duration {
return c.limits.ResultsCacheTTLForCardinalityQuery(userID)
}
-func (DefaultCacheKeyGenerator) LabelValuesCardinality(_ context.Context, path string, values url.Values) (*GenericQueryCacheKey, error) {
+func (DefaultCacheKeyGenerator) LabelValuesCardinality(r *http.Request) (*GenericQueryCacheKey, error) {
+ reqValues, err := util.ParseRequestFormWithoutConsumingBody(r)
+ if err != nil {
+ return nil, apierror.New(apierror.TypeBadData, err.Error())
+ }
+
switch {
- case strings.HasSuffix(path, cardinalityLabelNamesPathSuffix):
- parsed, err := cardinality.DecodeLabelNamesRequestFromValues(values)
+ case strings.HasSuffix(r.URL.Path, cardinalityLabelNamesPathSuffix):
+ parsed, err := cardinality.DecodeLabelNamesRequestFromValues(reqValues)
if err != nil {
return nil, err
}
@@ -51,8 +56,8 @@ func (DefaultCacheKeyGenerator) LabelValuesCardinality(_ context.Context, path s
CacheKey: parsed.String(),
CacheKeyPrefix: cardinalityLabelNamesQueryCachePrefix,
}, nil
- case strings.HasSuffix(path, cardinalityLabelValuesPathSuffix):
- parsed, err := cardinality.DecodeLabelValuesRequestFromValues(values)
+ case strings.HasSuffix(r.URL.Path, cardinalityLabelValuesPathSuffix):
+ parsed, err := cardinality.DecodeLabelValuesRequestFromValues(reqValues)
if err != nil {
return nil, err
}
@@ -61,8 +66,8 @@ func (DefaultCacheKeyGenerator) LabelValuesCardinality(_ context.Context, path s
CacheKey: parsed.String(),
CacheKeyPrefix: cardinalityLabelValuesQueryCachePrefix,
}, nil
- case strings.HasSuffix(path, cardinalityActiveSeriesPathSuffix):
- parsed, err := cardinality.DecodeActiveSeriesRequestFromValues(values)
+ case strings.HasSuffix(r.URL.Path, cardinalityActiveSeriesPathSuffix):
+ parsed, err := cardinality.DecodeActiveSeriesRequestFromValues(reqValues)
if err != nil {
return nil, err
}
diff --git a/pkg/frontend/querymiddleware/cardinality_query_cache_test.go b/pkg/frontend/querymiddleware/cardinality_query_cache_test.go
index 1af331906f0..89c5f2d6401 100644
--- a/pkg/frontend/querymiddleware/cardinality_query_cache_test.go
+++ b/pkg/frontend/querymiddleware/cardinality_query_cache_test.go
@@ -46,7 +46,7 @@ func TestCardinalityQueryCache_RoundTrip_WithTenantFederation(t *testing.T) {
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
// Mock the downstream.
- downstream := RoundTripFunc(func(request *http.Request) (*http.Response, error) {
+ downstream := RoundTripFunc(func(*http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Body: io.NopCloser(strings.NewReader("{}")),
diff --git a/pkg/frontend/querymiddleware/cardinality_test.go b/pkg/frontend/querymiddleware/cardinality_test.go
index 5b78e5972ee..ff1738de83b 100644
--- a/pkg/frontend/querymiddleware/cardinality_test.go
+++ b/pkg/frontend/querymiddleware/cardinality_test.go
@@ -29,7 +29,7 @@ func Test_cardinalityEstimateBucket_QueryRequest_keyFormat(t *testing.T) {
tests := []struct {
name string
userID string
- r Request
+ r MetricsQueryRequest
expected string
}{
{
@@ -133,7 +133,7 @@ func Test_cardinalityEstimation_Do(t *testing.T) {
Query: "up",
}
addSeriesHandler := func(estimate, actual uint64) HandlerFunc {
- return func(ctx context.Context, request Request) (Response, error) {
+ return func(ctx context.Context, request MetricsQueryRequest) (Response, error) {
require.NotNil(t, request.GetHints())
request.GetHints().GetCardinalityEstimate()
require.Equal(t, request.GetHints().GetEstimatedSeriesCount(), estimate)
@@ -158,7 +158,7 @@ func Test_cardinalityEstimation_Do(t *testing.T) {
{
name: "no tenantID",
tenantID: "",
- downstreamHandler: func(_ context.Context, _ Request) (Response, error) {
+ downstreamHandler: func(_ context.Context, _ MetricsQueryRequest) (Response, error) {
return &PrometheusResponse{}, nil
},
expectedLoads: 0,
@@ -168,7 +168,7 @@ func Test_cardinalityEstimation_Do(t *testing.T) {
{
name: "downstream error",
tenantID: "1",
- downstreamHandler: func(_ context.Context, _ Request) (Response, error) {
+ downstreamHandler: func(_ context.Context, _ MetricsQueryRequest) (Response, error) {
return nil, errors.New("test error")
},
expectedLoads: 1,
@@ -205,7 +205,7 @@ func Test_cardinalityEstimation_Do(t *testing.T) {
{
name: "with empty cache",
tenantID: "1",
- downstreamHandler: func(ctx context.Context, request Request) (Response, error) {
+ downstreamHandler: func(ctx context.Context, _ MetricsQueryRequest) (Response, error) {
queryStats := stats.FromContext(ctx)
queryStats.AddFetchedSeries(numSeries)
return &PrometheusResponse{}, nil
@@ -251,8 +251,8 @@ func Test_cardinalityEstimateBucket_QueryRequest_requestEquality(t *testing.T) {
name string
tenantA string
tenantB string
- requestA Request
- requestB Request
+ requestA MetricsQueryRequest
+ requestB MetricsQueryRequest
expectedEqual bool
}{
{
diff --git a/pkg/frontend/querymiddleware/codec.go b/pkg/frontend/querymiddleware/codec.go
index 056bda16c1f..764822ac478 100644
--- a/pkg/frontend/querymiddleware/codec.go
+++ b/pkg/frontend/querymiddleware/codec.go
@@ -62,17 +62,21 @@ const (
formatProtobuf = "protobuf"
)
-// Codec is used to encode/decode query range requests and responses so they can be passed down to middlewares.
+// Codec is used to encode/decode query requests and responses so they can be passed down to middlewares.
type Codec interface {
Merger
- // DecodeRequest decodes a Request from an http request.
- DecodeRequest(context.Context, *http.Request) (Request, error)
+ // DecodeMetricsQueryRequest decodes a MetricsQueryRequest from an http request.
+ DecodeMetricsQueryRequest(context.Context, *http.Request) (MetricsQueryRequest, error)
+ // DecodeLabelsQueryRequest decodes a LabelsQueryRequest from an http request.
+ DecodeLabelsQueryRequest(context.Context, *http.Request) (LabelsQueryRequest, error)
// DecodeResponse decodes a Response from an http response.
// The original request is also passed as a parameter this is useful for implementation that needs the request
// to merge result or build the result correctly.
- DecodeResponse(context.Context, *http.Response, Request, log.Logger) (Response, error)
- // EncodeRequest encodes a Request into an http request.
- EncodeRequest(context.Context, Request) (*http.Request, error)
+ DecodeResponse(context.Context, *http.Response, MetricsQueryRequest, log.Logger) (Response, error)
+ // EncodeMetricsQueryRequest encodes a MetricsQueryRequest into an http request.
+ EncodeMetricsQueryRequest(context.Context, MetricsQueryRequest) (*http.Request, error)
+ // EncodeLabelsQueryRequest encodes a LabelsQueryRequest into an http request.
+ EncodeLabelsQueryRequest(context.Context, LabelsQueryRequest) (*http.Request, error)
// EncodeResponse encodes a Response into an http response.
EncodeResponse(context.Context, *http.Request, Response) (*http.Response, error)
}
@@ -83,9 +87,9 @@ type Merger interface {
MergeResponse(...Response) (Response, error)
}
-// Request represents a query range request that can be process by middlewares.
-type Request interface {
- // GetId returns the ID of the request used by splitAndCacheMiddleware to correlate downstream requests and responses.
+// MetricsQueryRequest represents an instant or query range request that can be process by middlewares.
+type MetricsQueryRequest interface {
+ // GetId returns the ID of the request used to correlate downstream requests and responses.
GetId() int64
// GetStart returns the start timestamp of the request in milliseconds.
GetStart() int64
@@ -101,20 +105,45 @@ type Request interface {
// These hints can be used to optimize the query execution.
GetHints() *Hints
// WithID clones the current request with the provided ID.
- WithID(id int64) Request
+ WithID(id int64) MetricsQueryRequest
// WithStartEnd clone the current request with different start and end timestamp.
- WithStartEnd(startTime int64, endTime int64) Request
+ WithStartEnd(startTime int64, endTime int64) MetricsQueryRequest
// WithQuery clone the current request with a different query.
- WithQuery(string) Request
+ WithQuery(string) MetricsQueryRequest
// WithTotalQueriesHint adds the number of total queries to this request's Hints.
- WithTotalQueriesHint(int32) Request
+ WithTotalQueriesHint(int32) MetricsQueryRequest
// WithEstimatedSeriesCountHint WithEstimatedCardinalityHint adds a cardinality estimate to this request's Hints.
- WithEstimatedSeriesCountHint(uint64) Request
+ WithEstimatedSeriesCountHint(uint64) MetricsQueryRequest
proto.Message
// AddSpanTags writes information about this request to an OpenTracing span
AddSpanTags(opentracing.Span)
}
+// LabelsQueryRequest represents a label names or values query request that can be process by middlewares.
+type LabelsQueryRequest interface {
+ proto.Message
+ // GetLabelName returns the label name param from a Label Values request `/api/v1/label//values`
+ // or an empty string for a Label Names request `/api/v1/labels`
+ GetLabelName() string
+ // GetStart returns the start timestamp of the request in milliseconds
+ GetStart() int64
+ // GetStartOrDefault returns the start timestamp of the request in milliseconds,
+ // or the Prometheus v1 API MinTime if no start timestamp was provided on the original request.
+ GetStartOrDefault() int64
+ // GetEnd returns the start timestamp of the request in milliseconds
+ GetEnd() int64
+ // GetEndOrDefault returns the end timestamp of the request in milliseconds,
+ // or the Prometheus v1 API MaxTime if no end timestamp was provided on the original request.
+ GetEndOrDefault() int64
+ // GetLabelMatcherSets returns the label matchers a.k.a series selectors for Prometheus label query requests,
+ // as retained in their original string format. This enables the request to be symmetrically decoded and encoded
+ // to and from the http request format without needing to undo the Prometheus parser converting between formats
+ // like `up{job="prometheus"}` and `{__name__="up, job="prometheus"}`, or other idiosyncrasies.
+ GetLabelMatcherSets() []string
+ // AddSpanTags writes information about this request to an OpenTracing span
+ AddSpanTags(opentracing.Span)
+}
+
// Response represents a query range response.
type Response interface {
proto.Message
@@ -217,21 +246,26 @@ func (prometheusCodec) MergeResponse(responses ...Response) (Response, error) {
}, nil
}
-func (c prometheusCodec) DecodeRequest(_ context.Context, r *http.Request) (Request, error) {
+func (c prometheusCodec) DecodeMetricsQueryRequest(_ context.Context, r *http.Request) (MetricsQueryRequest, error) {
switch {
case IsRangeQuery(r.URL.Path):
return c.decodeRangeQueryRequest(r)
case IsInstantQuery(r.URL.Path):
return c.decodeInstantQueryRequest(r)
default:
- return nil, fmt.Errorf("prometheus codec doesn't support requests to %s", r.URL.Path)
+ return nil, fmt.Errorf("unknown metrics query API endpoint %s", r.URL.Path)
}
}
-func (prometheusCodec) decodeRangeQueryRequest(r *http.Request) (Request, error) {
+func (prometheusCodec) decodeRangeQueryRequest(r *http.Request) (MetricsQueryRequest, error) {
var result PrometheusRangeQueryRequest
var err error
- result.Start, result.End, result.Step, err = DecodeRangeQueryTimeParams(r)
+ reqValues, err := util.ParseRequestFormWithoutConsumingBody(r)
+ if err != nil {
+ return nil, apierror.New(apierror.TypeBadData, err.Error())
+ }
+
+ result.Start, result.End, result.Step, err = DecodeRangeQueryTimeParams(&reqValues)
if err != nil {
return nil, err
}
@@ -242,29 +276,68 @@ func (prometheusCodec) decodeRangeQueryRequest(r *http.Request) (Request, error)
return &result, nil
}
-func (c prometheusCodec) decodeInstantQueryRequest(r *http.Request) (Request, error) {
+func (c prometheusCodec) decodeInstantQueryRequest(r *http.Request) (MetricsQueryRequest, error) {
var result PrometheusInstantQueryRequest
var err error
- result.Time, err = DecodeInstantQueryTimeParams(r, time.Now)
+ reqValues, err := util.ParseRequestFormWithoutConsumingBody(r)
+ if err != nil {
+ return nil, apierror.New(apierror.TypeBadData, err.Error())
+ }
+
+ result.Time, err = DecodeInstantQueryTimeParams(&reqValues, time.Now)
if err != nil {
return nil, decorateWithParamName(err, "time")
}
- result.Query = r.FormValue("query")
+ result.Query = reqValues.Get("query")
result.Path = r.URL.Path
decodeOptions(r, &result.Options)
return &result, nil
}
+func (prometheusCodec) DecodeLabelsQueryRequest(_ context.Context, r *http.Request) (LabelsQueryRequest, error) {
+ if !IsLabelsQuery(r.URL.Path) {
+ return nil, fmt.Errorf("unknown labels query API endpoint %s", r.URL.Path)
+ }
+
+ reqValues, err := util.ParseRequestFormWithoutConsumingBody(r)
+ if err != nil {
+ return nil, apierror.New(apierror.TypeBadData, err.Error())
+ }
+ start, end, err := DecodeLabelsQueryTimeParams(&reqValues, false)
+ if err != nil {
+ return nil, err
+ }
+
+ labelMatcherSets := reqValues["match[]"]
+
+ if IsLabelNamesQuery(r.URL.Path) {
+ return &PrometheusLabelNamesQueryRequest{
+ Path: r.URL.Path,
+ Start: start,
+ End: end,
+ LabelMatcherSets: labelMatcherSets,
+ }, nil
+ }
+ // else, must be Label Values Request due to IsLabelsQuery check at beginning of func
+ return &PrometheusLabelValuesQueryRequest{
+ Path: r.URL.Path,
+ LabelName: labelValuesPathSuffix.FindStringSubmatch(r.URL.Path)[1],
+ Start: start,
+ End: end,
+ LabelMatcherSets: labelMatcherSets,
+ }, nil
+}
+
// DecodeRangeQueryTimeParams encapsulates Prometheus instant query time param parsing,
// emulating the logic in prometheus/prometheus/web/api/v1#API.query_range.
-func DecodeRangeQueryTimeParams(r *http.Request) (start, end, step int64, err error) {
- start, err = util.ParseTime(r.FormValue("start"))
+func DecodeRangeQueryTimeParams(reqValues *url.Values) (start, end, step int64, err error) {
+ start, err = util.ParseTime(reqValues.Get("start"))
if err != nil {
return 0, 0, 0, decorateWithParamName(err, "start")
}
- end, err = util.ParseTime(r.FormValue("end"))
+ end, err = util.ParseTime(reqValues.Get("end"))
if err != nil {
return 0, 0, 0, decorateWithParamName(err, "end")
}
@@ -273,7 +346,7 @@ func DecodeRangeQueryTimeParams(r *http.Request) (start, end, step int64, err er
return 0, 0, 0, errEndBeforeStart
}
- step, err = parseDurationMs(r.FormValue("step"))
+ step, err = parseDurationMs(reqValues.Get("step"))
if err != nil {
return 0, 0, 0, decorateWithParamName(err, "step")
}
@@ -293,32 +366,61 @@ func DecodeRangeQueryTimeParams(r *http.Request) (start, end, step int64, err er
// DecodeInstantQueryTimeParams encapsulates Prometheus instant query time param parsing,
// emulating the logic in prometheus/prometheus/web/api/v1#API.query.
-func DecodeInstantQueryTimeParams(r *http.Request, now func() time.Time) (int64, error) {
- time, err := util.ParseTimeParam(r, "time", now().UnixMilli())
- if err != nil {
- return 0, decorateWithParamName(err, "time")
+func DecodeInstantQueryTimeParams(reqValues *url.Values, defaultNow func() time.Time) (time int64, err error) {
+ timeVal := reqValues.Get("time")
+ if timeVal == "" {
+ time = defaultNow().UnixMilli()
+ } else {
+ time, err = util.ParseTime(timeVal)
+ if err != nil {
+ return 0, decorateWithParamName(err, "time")
+ }
}
- return time, nil
+
+ return time, err
}
-// DecodeLabelsQueryTimeParams encapsulates Prometheus label names query time param parsing,
+// DecodeLabelsQueryTimeParams encapsulates Prometheus label names and label values query time param parsing,
// emulating the logic in prometheus/prometheus/web/api/v1#API.labelNames and v1#API.labelValues.
-func DecodeLabelsQueryTimeParams(r *http.Request) (start, end int64, err error) {
- start, err = util.ParseTimeParam(r, "start", v1.MinTime.UnixMilli())
- if err != nil {
- return 0, 0, decorateWithParamName(err, "start")
+//
+// Setting `usePromDefaults` true will set missing timestamp params to the Prometheus default
+// min and max query timestamps; false will default to 0 for missing timestamp params.
+func DecodeLabelsQueryTimeParams(reqValues *url.Values, usePromDefaults bool) (start, end int64, err error) {
+ var defaultStart, defaultEnd int64
+ if usePromDefaults {
+ defaultStart = v1.MinTime.UnixMilli()
+ defaultEnd = v1.MaxTime.UnixMilli()
}
- end, err = util.ParseTimeParam(r, "end", v1.MaxTime.UnixMilli())
if err != nil {
- return 0, 0, decorateWithParamName(err, "end")
+ return 0, 0, apierror.New(apierror.TypeBadData, err.Error())
}
- if end < start {
+ startVal := reqValues.Get("start")
+ if startVal == "" {
+ start = defaultStart
+ } else {
+ start, err = util.ParseTime(startVal)
+ if err != nil {
+ return 0, 0, decorateWithParamName(err, "start")
+ }
+ }
+
+ endVal := reqValues.Get("end")
+ if endVal == "" {
+ end = defaultEnd
+ } else {
+ end, err = util.ParseTime(endVal)
+ if err != nil {
+ return 0, 0, decorateWithParamName(err, "end")
+ }
+ }
+
+ if endVal != "" && end < start {
return 0, 0, errEndBeforeStart
}
- return start, end, nil
+ return start, end, err
}
func decodeOptions(r *http.Request, opts *Options) {
@@ -358,7 +460,7 @@ func decodeCacheDisabledOption(r *http.Request) bool {
return false
}
-func (c prometheusCodec) EncodeRequest(ctx context.Context, r Request) (*http.Request, error) {
+func (c prometheusCodec) EncodeMetricsQueryRequest(ctx context.Context, r MetricsQueryRequest) (*http.Request, error) {
var u *url.URL
switch r := r.(type) {
case *PrometheusRangeQueryRequest:
@@ -379,6 +481,7 @@ func (c prometheusCodec) EncodeRequest(ctx context.Context, r Request) (*http.Re
"query": []string{r.Query},
}.Encode(),
}
+
default:
return nil, fmt.Errorf("unsupported request type %T", r)
}
@@ -409,6 +512,70 @@ func (c prometheusCodec) EncodeRequest(ctx context.Context, r Request) (*http.Re
return req.WithContext(ctx), nil
}
+func (c prometheusCodec) EncodeLabelsQueryRequest(ctx context.Context, req LabelsQueryRequest) (*http.Request, error) {
+ var u *url.URL
+ switch req := req.(type) {
+ case *PrometheusLabelNamesQueryRequest:
+ urlValues := url.Values{}
+ if req.GetStart() != 0 {
+ urlValues["start"] = []string{encodeTime(req.Start)}
+ }
+ if req.GetEnd() != 0 {
+ urlValues["end"] = []string{encodeTime(req.End)}
+ }
+ if len(req.GetLabelMatcherSets()) > 0 {
+ urlValues["match[]"] = req.GetLabelMatcherSets()
+ }
+ u = &url.URL{
+ Path: req.Path,
+ RawQuery: urlValues.Encode(),
+ }
+ case *PrometheusLabelValuesQueryRequest:
+ // repeated from PrometheusLabelNamesQueryRequest case; Go type cast switch
+ // does not support accessing struct members on a typeA|typeB switch
+ urlValues := url.Values{}
+ if req.GetStart() != 0 {
+ urlValues["start"] = []string{encodeTime(req.Start)}
+ }
+ if req.GetEnd() != 0 {
+ urlValues["end"] = []string{encodeTime(req.End)}
+ }
+ if len(req.GetLabelMatcherSets()) > 0 {
+ urlValues["match[]"] = req.GetLabelMatcherSets()
+ }
+ u = &url.URL{
+ Path: req.Path, // path still contains label name
+ RawQuery: urlValues.Encode(),
+ }
+
+ default:
+ return nil, fmt.Errorf("unsupported request type %T", req)
+ }
+
+ r := &http.Request{
+ Method: "GET",
+ RequestURI: u.String(), // This is what the httpgrpc code looks at.
+ URL: u,
+ Body: http.NoBody,
+ Header: http.Header{},
+ }
+
+ switch c.preferredQueryResultResponseFormat {
+ case formatJSON:
+ r.Header.Set("Accept", jsonMimeType)
+ case formatProtobuf:
+ r.Header.Set("Accept", mimirpb.QueryResponseMimeType+","+jsonMimeType)
+ default:
+ return nil, fmt.Errorf("unknown query result response format '%s'", c.preferredQueryResultResponseFormat)
+ }
+
+ if consistency, ok := api.ReadConsistencyFromContext(ctx); ok {
+ r.Header.Add(api.ReadConsistencyHeader, consistency)
+ }
+
+ return r.WithContext(ctx), nil
+}
+
func encodeOptions(req *http.Request, o Options) {
if o.CacheDisabled {
req.Header.Set(cacheControlHeader, noStoreValue)
@@ -427,7 +594,7 @@ func encodeOptions(req *http.Request, o Options) {
}
}
-func (c prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ Request, logger log.Logger) (Response, error) {
+func (c prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ MetricsQueryRequest, logger log.Logger) (Response, error) {
switch r.StatusCode {
case http.StatusServiceUnavailable:
return nil, apierror.New(apierror.TypeUnavailable, string(mustReadResponseBody(r)))
diff --git a/pkg/frontend/querymiddleware/codec_test.go b/pkg/frontend/querymiddleware/codec_test.go
index 963ef555b6e..ca99d08ecd1 100644
--- a/pkg/frontend/querymiddleware/codec_test.go
+++ b/pkg/frontend/querymiddleware/codec_test.go
@@ -13,6 +13,7 @@ import (
"io"
"math/rand"
"net/http"
+ "net/url"
"strconv"
"strings"
"testing"
@@ -21,10 +22,11 @@ import (
"github.com/go-kit/log"
"github.com/grafana/dskit/user"
jsoniter "github.com/json-iterator/go"
- v1 "github.com/prometheus/client_golang/api/prometheus/v1"
+ v1Client "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/histogram"
+ v1API "github.com/prometheus/prometheus/web/api/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -37,12 +39,12 @@ var (
matrix = model.ValMatrix.String()
)
-func TestRequest(t *testing.T) {
+func TestMetricsQueryRequest(t *testing.T) {
codec := newTestPrometheusCodec()
for i, tc := range []struct {
url string
- expected Request
+ expected MetricsQueryRequest
expectedErr error
}{
{
@@ -95,26 +97,192 @@ func TestRequest(t *testing.T) {
ctx := user.InjectOrgID(context.Background(), "1")
r = r.WithContext(ctx)
- req, err := codec.DecodeRequest(ctx, r)
+ req, err := codec.DecodeMetricsQueryRequest(ctx, r)
if err != nil || tc.expectedErr != nil {
require.EqualValues(t, tc.expectedErr, err)
return
}
require.EqualValues(t, tc.expected, req)
- rdash, err := codec.EncodeRequest(context.Background(), req)
+ rdash, err := codec.EncodeMetricsQueryRequest(context.Background(), req)
require.NoError(t, err)
require.EqualValues(t, tc.url, rdash.RequestURI)
})
}
}
+func TestLabelsQueryRequest(t *testing.T) {
+ codec := newTestPrometheusCodec()
+
+ for _, testCase := range []struct {
+ name string
+ url string
+ expectedStruct LabelsQueryRequest
+ expectedGetLabelName string
+ expectedGetStartOrDefault int64
+ expectedGetEndOrDefault int64
+ expectedErr error
+ }{
+ {
+ name: "label names with start and end timestamps, no matcher sets",
+ url: "/api/v1/labels?end=1708588800&start=1708502400",
+ expectedStruct: &PrometheusLabelNamesQueryRequest{
+ Path: "/api/v1/labels",
+ Start: 1708502400 * 1e3,
+ End: 1708588800 * 1e3,
+ LabelMatcherSets: nil,
+ },
+ expectedGetLabelName: "",
+ expectedGetStartOrDefault: 1708502400 * 1e3,
+ expectedGetEndOrDefault: 1708588800 * 1e3,
+ },
+ {
+ name: "label values with start and end timestamps, no matcher sets",
+ url: "/api/v1/label/job/values?end=1708588800&start=1708502400",
+ expectedStruct: &PrometheusLabelValuesQueryRequest{
+ Path: "/api/v1/label/job/values",
+ LabelName: "job",
+ Start: 1708502400 * 1e3,
+ End: 1708588800 * 1e3,
+ LabelMatcherSets: nil,
+ },
+ expectedGetLabelName: "",
+ expectedGetStartOrDefault: 1708502400 * 1e3,
+ expectedGetEndOrDefault: 1708588800 * 1e3,
+ },
+ {
+ name: "label names with start timestamp, no end timestamp, no matcher sets",
+ url: "/api/v1/labels?start=1708502400",
+ expectedStruct: &PrometheusLabelNamesQueryRequest{
+ Path: "/api/v1/labels",
+ Start: 1708502400 * 1e3,
+ End: 0,
+ LabelMatcherSets: nil,
+ },
+ expectedGetLabelName: "",
+ expectedGetStartOrDefault: 1708502400 * 1e3,
+ expectedGetEndOrDefault: v1API.MaxTime.UnixMilli(),
+ },
+ {
+ name: "label values with start timestamp, no end timestamp, no matcher sets",
+ url: "/api/v1/label/job/values?start=1708502400",
+ expectedStruct: &PrometheusLabelValuesQueryRequest{
+ Path: "/api/v1/label/job/values",
+ LabelName: "job",
+ Start: 1708502400 * 1e3,
+ End: 0,
+ LabelMatcherSets: nil,
+ },
+ expectedGetLabelName: "job",
+ expectedGetStartOrDefault: 1708502400 * 1e3,
+ expectedGetEndOrDefault: v1API.MaxTime.UnixMilli(),
+ },
+ {
+ name: "label names with end timestamp, no start timestamp, no matcher sets",
+ url: "/api/v1/labels?end=1708588800",
+ expectedStruct: &PrometheusLabelNamesQueryRequest{
+ Path: "/api/v1/labels",
+ Start: 0,
+ End: 1708588800 * 1e3,
+ LabelMatcherSets: nil,
+ },
+ expectedGetLabelName: "",
+ expectedGetStartOrDefault: v1API.MinTime.UnixMilli(),
+ expectedGetEndOrDefault: 1708588800 * 1e3,
+ },
+ {
+ name: "label values with end timestamp, no start timestamp, no matcher sets",
+ url: "/api/v1/label/job/values?end=1708588800",
+ expectedStruct: &PrometheusLabelValuesQueryRequest{
+ Path: "/api/v1/label/job/values",
+ LabelName: "job",
+ Start: 0,
+ End: 1708588800 * 1e3,
+ LabelMatcherSets: nil,
+ },
+ expectedGetLabelName: "job",
+ expectedGetStartOrDefault: v1API.MinTime.UnixMilli(),
+ expectedGetEndOrDefault: 1708588800 * 1e3,
+ },
+ {
+ name: "label names with start timestamp, no end timestamp, multiple matcher sets",
+ url: "/api/v1/labels?end=1708588800&match%5B%5D=go_goroutines%7Bcontainer%3D~%22quer.%2A%22%7D&match%5B%5D=go_goroutines%7Bcontainer%21%3D%22query-scheduler%22%7D&start=1708502400",
+ expectedStruct: &PrometheusLabelNamesQueryRequest{
+ Path: "/api/v1/labels",
+ Start: 1708502400 * 1e3,
+ End: 1708588800 * 1e3,
+ LabelMatcherSets: []string{
+ "go_goroutines{container=~\"quer.*\"}",
+ "go_goroutines{container!=\"query-scheduler\"}",
+ },
+ },
+ expectedGetLabelName: "",
+ expectedGetStartOrDefault: 1708502400 * 1e3,
+ expectedGetEndOrDefault: 1708588800 * 1e3,
+ },
+ {
+ name: "label values with start timestamp, no end timestamp, multiple matcher sets",
+ url: "/api/v1/label/job/values?end=1708588800&match%5B%5D=go_goroutines%7Bcontainer%3D~%22quer.%2A%22%7D&match%5B%5D=go_goroutines%7Bcontainer%21%3D%22query-scheduler%22%7D&start=1708502400",
+ expectedStruct: &PrometheusLabelValuesQueryRequest{
+ Path: "/api/v1/label/job/values",
+ LabelName: "job",
+ Start: 1708502400 * 1e3,
+ End: 1708588800 * 1e3,
+ LabelMatcherSets: []string{
+ "go_goroutines{container=~\"quer.*\"}",
+ "go_goroutines{container!=\"query-scheduler\"}",
+ },
+ },
+ expectedGetLabelName: "job",
+ expectedGetStartOrDefault: 1708502400 * 1e3,
+ expectedGetEndOrDefault: 1708588800 * 1e3,
+ },
+ } {
+ t.Run(testCase.name, func(t *testing.T) {
+ for _, reqMethod := range []string{http.MethodGet, http.MethodPost} {
+
+ var r *http.Request
+ var err error
+
+ switch reqMethod {
+ case http.MethodGet:
+ r, err = http.NewRequest(reqMethod, testCase.url, nil)
+ require.NoError(t, err)
+ case http.MethodPost:
+ parsedURL, _ := url.Parse(testCase.url)
+ r, err = http.NewRequest(reqMethod, parsedURL.Path, strings.NewReader(parsedURL.RawQuery))
+ require.NoError(t, err)
+ r.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ default:
+ t.Fatalf("unsupported HTTP method %q", reqMethod)
+ }
+
+ ctx := user.InjectOrgID(context.Background(), "1")
+ r = r.WithContext(ctx)
+
+ reqDecoded, err := codec.DecodeLabelsQueryRequest(ctx, r)
+ if err != nil || testCase.expectedErr != nil {
+ require.EqualValues(t, testCase.expectedErr, err)
+ return
+ }
+ require.EqualValues(t, testCase.expectedStruct, reqDecoded)
+ require.EqualValues(t, testCase.expectedGetStartOrDefault, reqDecoded.GetStartOrDefault())
+ require.EqualValues(t, testCase.expectedGetEndOrDefault, reqDecoded.GetEndOrDefault())
+
+ reqEncoded, err := codec.EncodeLabelsQueryRequest(context.Background(), reqDecoded)
+ require.NoError(t, err)
+ require.EqualValues(t, testCase.url, reqEncoded.RequestURI)
+ }
+ })
+ }
+}
+
func TestPrometheusCodec_EncodeRequest_AcceptHeader(t *testing.T) {
for _, queryResultPayloadFormat := range allFormats {
t.Run(queryResultPayloadFormat, func(t *testing.T) {
codec := NewPrometheusCodec(prometheus.NewPedanticRegistry(), queryResultPayloadFormat)
req := PrometheusInstantQueryRequest{}
- encodedRequest, err := codec.EncodeRequest(context.Background(), &req)
+ encodedRequest, err := codec.EncodeMetricsQueryRequest(context.Background(), &req)
require.NoError(t, err)
switch queryResultPayloadFormat {
@@ -134,7 +302,7 @@ func TestPrometheusCodec_EncodeRequest_ReadConsistency(t *testing.T) {
t.Run(consistencyLevel, func(t *testing.T) {
codec := NewPrometheusCodec(prometheus.NewPedanticRegistry(), formatProtobuf)
ctx := api.ContextWithReadConsistency(context.Background(), consistencyLevel)
- encodedRequest, err := codec.EncodeRequest(ctx, &PrometheusInstantQueryRequest{})
+ encodedRequest, err := codec.EncodeMetricsQueryRequest(ctx, &PrometheusInstantQueryRequest{})
require.NoError(t, err)
require.Equal(t, consistencyLevel, encodedRequest.Header.Get(api.ReadConsistencyHeader))
})
@@ -144,7 +312,7 @@ func TestPrometheusCodec_EncodeRequest_ReadConsistency(t *testing.T) {
func TestPrometheusCodec_EncodeResponse_ContentNegotiation(t *testing.T) {
testResponse := &PrometheusResponse{
Status: statusError,
- ErrorType: string(v1.ErrExec),
+ ErrorType: string(v1Client.ErrExec),
Error: "something went wrong",
}
@@ -219,11 +387,11 @@ func TestPrometheusCodec_EncodeResponse_ContentNegotiation(t *testing.T) {
}
type prometheusAPIResponse struct {
- Status string `json:"status"`
- Data interface{} `json:"data,omitempty"`
- ErrorType v1.ErrorType `json:"errorType,omitempty"`
- Error string `json:"error,omitempty"`
- Warnings []string `json:"warnings,omitempty"`
+ Status string `json:"status"`
+ Data interface{} `json:"data,omitempty"`
+ ErrorType v1Client.ErrorType `json:"errorType,omitempty"`
+ Error string `json:"error,omitempty"`
+ Warnings []string `json:"warnings,omitempty"`
}
type prometheusResponseData struct {
@@ -965,14 +1133,14 @@ func TestPrometheusCodec_DecodeEncode(t *testing.T) {
expected.Header = make(http.Header)
}
- // This header is set by EncodeRequest according to the codec's config, so we
+ // This header is set by EncodeMetricsQueryRequest according to the codec's config, so we
// should always expect it to be present on the re-encoded request.
expected.Header.Set("Accept", "application/json")
ctx := context.Background()
- decoded, err := codec.DecodeRequest(ctx, expected)
+ decoded, err := codec.DecodeMetricsQueryRequest(ctx, expected)
require.NoError(t, err)
- encoded, err := codec.EncodeRequest(ctx, decoded)
+ encoded, err := codec.EncodeMetricsQueryRequest(ctx, decoded)
require.NoError(t, err)
assert.Equal(t, expected.URL, encoded.URL)
diff --git a/pkg/frontend/querymiddleware/generic_query_cache.go b/pkg/frontend/querymiddleware/generic_query_cache.go
index a6c0b9c0f2e..5d439ee942d 100644
--- a/pkg/frontend/querymiddleware/generic_query_cache.go
+++ b/pkg/frontend/querymiddleware/generic_query_cache.go
@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"net/http"
- "net/url"
"time"
"github.com/go-kit/log"
@@ -16,7 +15,6 @@ import (
"github.com/grafana/dskit/tenant"
apierror "github.com/grafana/mimir/pkg/api/error"
- "github.com/grafana/mimir/pkg/util"
"github.com/grafana/mimir/pkg/util/spanlogger"
"github.com/grafana/mimir/pkg/util/validation"
)
@@ -35,7 +33,7 @@ type tenantCacheTTL interface {
ttl(userID string) time.Duration
}
-type keyingFunc func(ctx context.Context, path string, values url.Values) (*GenericQueryCacheKey, error)
+type keyingFunc func(r *http.Request) (*GenericQueryCacheKey, error)
// genericQueryCache is a http.RoundTripped wrapping the downstream with a generic HTTP response cache.
type genericQueryCache struct {
@@ -83,15 +81,7 @@ func (c *genericQueryCache) RoundTrip(req *http.Request) (*http.Response, error)
return c.next.RoundTrip(req)
}
- // Decode the request.
- reqValues, err := util.ParseRequestFormWithoutConsumingBody(req)
- if err != nil {
- // This is considered a non-recoverable error, so we return error instead of passing
- // the request to the downstream.
- return nil, apierror.New(apierror.TypeBadData, err.Error())
- }
-
- queryReq, err := c.cacheKey(ctx, req.URL.Path, reqValues)
+ queryReq, err := c.cacheKey(req)
if err != nil {
if !errors.Is(err, ErrUnsupportedRequest) {
// Logging as info because it's not an actionable error here.
diff --git a/pkg/frontend/querymiddleware/generic_query_cache_test.go b/pkg/frontend/querymiddleware/generic_query_cache_test.go
index 51911fc2393..9cf1fd1d71b 100644
--- a/pkg/frontend/querymiddleware/generic_query_cache_test.go
+++ b/pkg/frontend/querymiddleware/generic_query_cache_test.go
@@ -131,7 +131,7 @@ func testGenericQueryCacheRoundTrip(t *testing.T, newRoundTripper newGenericQuer
expectedStoredToCache: false, // Should not store anything to the cache.
},
"should fetch the response from the downstream and overwrite the cached response if corrupted": {
- init: func(t *testing.T, c cache.Cache, _, reqHashedCacheKey string) {
+ init: func(_ *testing.T, c cache.Cache, _, reqHashedCacheKey string) {
c.StoreAsync(map[string][]byte{reqHashedCacheKey: []byte("corrupted")}, time.Minute)
},
cacheTTL: time.Minute,
@@ -226,7 +226,7 @@ func testGenericQueryCacheRoundTrip(t *testing.T, newRoundTripper newGenericQuer
initialStoreCallsCount := cacheBackend.CountStoreCalls()
reg := prometheus.NewPedanticRegistry()
- rt := newRoundTripper(cacheBackend, DefaultCacheKeyGenerator{}, limits, downstream, testutil.NewLogger(t), reg)
+ rt := newRoundTripper(cacheBackend, DefaultCacheKeyGenerator{codec: NewPrometheusCodec(reg, formatJSON)}, limits, downstream, testutil.NewLogger(t), reg)
res, err := rt.RoundTrip(req)
require.NoError(t, err)
diff --git a/pkg/frontend/querymiddleware/instrumentation.go b/pkg/frontend/querymiddleware/instrumentation.go
index b0d8823d137..29164ee6ebf 100644
--- a/pkg/frontend/querymiddleware/instrumentation.go
+++ b/pkg/frontend/querymiddleware/instrumentation.go
@@ -16,7 +16,7 @@ import (
)
// newInstrumentMiddleware can be inserted into the middleware chain to expose timing information.
-func newInstrumentMiddleware(name string, metrics *instrumentMiddlewareMetrics) Middleware {
+func newInstrumentMiddleware(name string, metrics *instrumentMiddlewareMetrics) MetricsQueryMiddleware {
var durationCol instrument.Collector
// Support the case metrics shouldn't be tracked (ie. unit tests).
@@ -26,8 +26,8 @@ func newInstrumentMiddleware(name string, metrics *instrumentMiddlewareMetrics)
durationCol = &noopCollector{}
}
- return MiddlewareFunc(func(next Handler) Handler {
- return HandlerFunc(func(ctx context.Context, req Request) (Response, error) {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
+ return HandlerFunc(func(ctx context.Context, req MetricsQueryRequest) (Response, error) {
var resp Response
err := instrument.CollectedRequest(ctx, name, durationCol, instrument.ErrorCode, func(ctx context.Context) error {
sp := opentracing.SpanFromContext(ctx)
diff --git a/pkg/frontend/querymiddleware/labels_query_cache.go b/pkg/frontend/querymiddleware/labels_query_cache.go
index b711d113127..1764673d24d 100644
--- a/pkg/frontend/querymiddleware/labels_query_cache.go
+++ b/pkg/frontend/querymiddleware/labels_query_cache.go
@@ -3,7 +3,6 @@
package querymiddleware
import (
- "context"
"fmt"
"net/http"
"net/url"
@@ -29,7 +28,14 @@ const (
stringParamSeparator = rune(0)
)
-func newLabelsQueryCacheRoundTripper(cache cache.Cache, generator CacheKeyGenerator, limits Limits, next http.RoundTripper, logger log.Logger, reg prometheus.Registerer) http.RoundTripper {
+func newLabelsQueryCacheRoundTripper(
+ cache cache.Cache,
+ generator CacheKeyGenerator,
+ limits Limits,
+ next http.RoundTripper,
+ logger log.Logger,
+ reg prometheus.Registerer,
+) http.RoundTripper {
ttl := &labelsQueryTTL{
limits: limits,
}
@@ -45,42 +51,37 @@ func (c *labelsQueryTTL) ttl(userID string) time.Duration {
return c.limits.ResultsCacheTTLForLabelsQuery(userID)
}
-func (DefaultCacheKeyGenerator) LabelValues(_ context.Context, path string, values url.Values) (*GenericQueryCacheKey, error) {
- var (
- cacheKeyPrefix string
- labelName string
- )
-
- // Detect the request type
- switch {
- case strings.HasSuffix(path, labelNamesPathSuffix):
- cacheKeyPrefix = labelNamesQueryCachePrefix
- case labelValuesPathSuffix.MatchString(path):
- cacheKeyPrefix = labelValuesQueryCachePrefix
- labelName = labelValuesPathSuffix.FindStringSubmatch(path)[1]
- default:
- return nil, errors.New("unknown labels API endpoint")
- }
-
- // Both the label names and label values API endpoints support the same exact parameters (with the same defaults),
- // so in this function there's no distinction between the two.
- startTime, err := parseRequestTimeParam(values, "start", v1.MinTime.UnixMilli())
+func (g DefaultCacheKeyGenerator) LabelValues(r *http.Request) (*GenericQueryCacheKey, error) {
+ labelValuesReq, err := g.codec.DecodeLabelsQueryRequest(r.Context(), r)
if err != nil {
return nil, err
}
- endTime, err := parseRequestTimeParam(values, "end", v1.MaxTime.UnixMilli())
- if err != nil {
- return nil, err
+ var cacheKeyPrefix string
+ switch labelValuesReq.(type) {
+ case *PrometheusLabelNamesQueryRequest:
+ cacheKeyPrefix = labelNamesQueryCachePrefix
+ case *PrometheusLabelValuesQueryRequest:
+ cacheKeyPrefix = labelValuesQueryCachePrefix
}
- matcherSets, err := parseRequestMatchersParam(values, "match[]")
+ labelMatcherSets, err := parseRequestMatchersParam(
+ map[string][]string{"match[]": labelValuesReq.GetLabelMatcherSets()},
+ "match[]",
+ )
if err != nil {
return nil, err
}
+ cacheKey := generateLabelsQueryRequestCacheKey(
+ labelValuesReq.GetStartOrDefault(),
+ labelValuesReq.GetEndOrDefault(),
+ labelValuesReq.GetLabelName(),
+ labelMatcherSets,
+ )
+
return &GenericQueryCacheKey{
- CacheKey: generateLabelsQueryRequestCacheKey(startTime, endTime, labelName, matcherSets),
+ CacheKey: cacheKey,
CacheKeyPrefix: cacheKeyPrefix,
}, nil
}
@@ -123,24 +124,6 @@ func generateLabelsQueryRequestCacheKey(startTime, endTime int64, labelName stri
return b.String()
}
-func parseRequestTimeParam(values url.Values, paramName string, defaultValue int64) (int64, error) {
- var value string
- if len(values[paramName]) > 0 {
- value = values[paramName][0]
- }
-
- if value == "" {
- return defaultValue, nil
- }
-
- parsed, err := util.ParseTime(value)
- if err != nil {
- return 0, errors.Wrapf(err, "invalid '%s' parameter", paramName)
- }
-
- return parsed, nil
-}
-
func parseRequestMatchersParam(values url.Values, paramName string) ([][]*labels.Matcher, error) {
matcherSets := make([][]*labels.Matcher, 0, len(values[paramName]))
diff --git a/pkg/frontend/querymiddleware/labels_query_cache_test.go b/pkg/frontend/querymiddleware/labels_query_cache_test.go
index 69d95a40c63..e6b9e2f5d6c 100644
--- a/pkg/frontend/querymiddleware/labels_query_cache_test.go
+++ b/pkg/frontend/querymiddleware/labels_query_cache_test.go
@@ -3,7 +3,6 @@
package querymiddleware
import (
- "context"
"fmt"
"net/http"
"net/url"
@@ -11,6 +10,7 @@ import (
"testing"
"time"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/model/labels"
v1 "github.com/prometheus/prometheus/web/api/v1"
"github.com/stretchr/testify/assert"
@@ -125,6 +125,7 @@ func TestDefaultCacheKeyGenerator_LabelValuesCacheKey(t *testing.T) {
requestTypes := map[string]struct {
requestPath string
+ request *http.Request
expectedCacheKeyPrefix string
expectedCacheKeyWithLabelName bool
}{
@@ -140,12 +141,20 @@ func TestDefaultCacheKeyGenerator_LabelValuesCacheKey(t *testing.T) {
},
}
+ reg := prometheus.NewPedanticRegistry()
+ codec := NewPrometheusCodec(reg, formatJSON)
+
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
for requestTypeName, requestTypeData := range requestTypes {
t.Run(requestTypeName, func(t *testing.T) {
- c := DefaultCacheKeyGenerator{}
- actual, err := c.LabelValues(context.Background(), requestTypeData.requestPath, testData.params)
+ c := DefaultCacheKeyGenerator{codec: codec}
+ requestURL, _ := url.Parse(requestTypeData.requestPath)
+ requestURL.RawQuery = testData.params.Encode()
+ request, err := http.NewRequest("GET", requestURL.String(), nil)
+ require.NoError(t, err)
+
+ actual, err := c.LabelValues(request)
require.NoError(t, err)
assert.Equal(t, requestTypeData.expectedCacheKeyPrefix, actual.CacheKeyPrefix)
diff --git a/pkg/frontend/querymiddleware/limits.go b/pkg/frontend/querymiddleware/limits.go
index a3957eea655..0121a4b442a 100644
--- a/pkg/frontend/querymiddleware/limits.go
+++ b/pkg/frontend/querymiddleware/limits.go
@@ -110,13 +110,13 @@ type Limits interface {
type limitsMiddleware struct {
Limits
- next Handler
+ next MetricsQueryHandler
logger log.Logger
}
-// newLimitsMiddleware creates a new Middleware that enforces query limits.
-func newLimitsMiddleware(l Limits, logger log.Logger) Middleware {
- return MiddlewareFunc(func(next Handler) Handler {
+// newLimitsMiddleware creates a new MetricsQueryMiddleware that enforces query limits.
+func newLimitsMiddleware(l Limits, logger log.Logger) MetricsQueryMiddleware {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return limitsMiddleware{
next: next,
Limits: l,
@@ -125,7 +125,7 @@ func newLimitsMiddleware(l Limits, logger log.Logger) Middleware {
})
}
-func (l limitsMiddleware) Do(ctx context.Context, r Request) (Response, error) {
+func (l limitsMiddleware) Do(ctx context.Context, r MetricsQueryRequest) (Response, error) {
log, ctx := spanlogger.NewWithLogger(ctx, l.logger, "limits")
defer log.Finish()
@@ -201,15 +201,15 @@ func (l limitsMiddleware) Do(ctx context.Context, r Request) (Response, error) {
}
type limitedParallelismRoundTripper struct {
- downstream Handler
+ downstream MetricsQueryHandler
limits Limits
codec Codec
- middleware Middleware
+ middleware MetricsQueryMiddleware
}
// newLimitedParallelismRoundTripper creates a new roundtripper that enforces MaxQueryParallelism to the `next` roundtripper across `middlewares`.
-func newLimitedParallelismRoundTripper(next http.RoundTripper, codec Codec, limits Limits, middlewares ...Middleware) http.RoundTripper {
+func newLimitedParallelismRoundTripper(next http.RoundTripper, codec Codec, limits Limits, middlewares ...MetricsQueryMiddleware) http.RoundTripper {
return limitedParallelismRoundTripper{
downstream: roundTripperHandler{
next: next,
@@ -217,7 +217,7 @@ func newLimitedParallelismRoundTripper(next http.RoundTripper, codec Codec, limi
},
codec: codec,
limits: limits,
- middleware: MergeMiddlewares(middlewares...),
+ middleware: MergeMetricsQueryMiddlewares(middlewares...),
}
}
@@ -225,7 +225,7 @@ func (rt limitedParallelismRoundTripper) RoundTrip(r *http.Request) (*http.Respo
ctx, cancel := context.WithCancelCause(r.Context())
defer cancel(errExecutingParallelQueriesFinished)
- request, err := rt.codec.DecodeRequest(ctx, r)
+ request, err := rt.codec.DecodeMetricsQueryRequest(ctx, r)
if err != nil {
return nil, err
}
@@ -246,7 +246,7 @@ func (rt limitedParallelismRoundTripper) RoundTrip(r *http.Request) (*http.Respo
// parallel from upstream handlers and ensure that no more than MaxQueryParallelism
// sub-requests run in parallel.
response, err := rt.middleware.Wrap(
- HandlerFunc(func(ctx context.Context, r Request) (Response, error) {
+ HandlerFunc(func(ctx context.Context, r MetricsQueryRequest) (Response, error) {
if err := sem.Acquire(ctx, 1); err != nil {
return nil, fmt.Errorf("could not acquire work: %w", err)
}
@@ -261,17 +261,17 @@ func (rt limitedParallelismRoundTripper) RoundTrip(r *http.Request) (*http.Respo
return rt.codec.EncodeResponse(ctx, r, response)
}
-// roundTripperHandler is an adapter that implements the Handler interface using a http.RoundTripper to perform
+// roundTripperHandler is an adapter that implements the MetricsQueryHandler interface using a http.RoundTripper to perform
// the requests and a Codec to translate between http Request/Response model and this package's Request/Response model.
-// It basically encodes a Request from Handler.Do and decodes response from next roundtripper.
+// It basically encodes a MetricsQueryRequest from MetricsQueryHandler.Do and decodes response from next roundtripper.
type roundTripperHandler struct {
logger log.Logger
next http.RoundTripper
codec Codec
}
-func (rth roundTripperHandler) Do(ctx context.Context, r Request) (Response, error) {
- request, err := rth.codec.EncodeRequest(ctx, r)
+func (rth roundTripperHandler) Do(ctx context.Context, r MetricsQueryRequest) (Response, error) {
+ request, err := rth.codec.EncodeMetricsQueryRequest(ctx, r)
if err != nil {
return nil, err
}
diff --git a/pkg/frontend/querymiddleware/limits_test.go b/pkg/frontend/querymiddleware/limits_test.go
index 3420d4c5363..9fd66711b0c 100644
--- a/pkg/frontend/querymiddleware/limits_test.go
+++ b/pkg/frontend/querymiddleware/limits_test.go
@@ -122,8 +122,8 @@ func TestLimitsMiddleware_MaxQueryLookback(t *testing.T) {
delta := float64(5000)
require.Len(t, inner.Calls, 1)
- assert.InDelta(t, util.TimeToMillis(testData.expectedStartTime), inner.Calls[0].Arguments.Get(1).(Request).GetStart(), delta)
- assert.InDelta(t, util.TimeToMillis(testData.expectedEndTime), inner.Calls[0].Arguments.Get(1).(Request).GetEnd(), delta)
+ assert.InDelta(t, util.TimeToMillis(testData.expectedStartTime), inner.Calls[0].Arguments.Get(1).(MetricsQueryRequest).GetStart(), delta)
+ assert.InDelta(t, util.TimeToMillis(testData.expectedEndTime), inner.Calls[0].Arguments.Get(1).(MetricsQueryRequest).GetEnd(), delta)
}
})
}
@@ -283,8 +283,8 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) {
// The time range of the request passed to the inner handler should have not been manipulated.
require.Len(t, inner.Calls, 1)
- assert.Equal(t, util.TimeToMillis(testData.reqStartTime), inner.Calls[0].Arguments.Get(1).(Request).GetStart())
- assert.Equal(t, util.TimeToMillis(testData.reqEndTime), inner.Calls[0].Arguments.Get(1).(Request).GetEnd())
+ assert.Equal(t, util.TimeToMillis(testData.reqStartTime), inner.Calls[0].Arguments.Get(1).(MetricsQueryRequest).GetStart())
+ assert.Equal(t, util.TimeToMillis(testData.reqEndTime), inner.Calls[0].Arguments.Get(1).(MetricsQueryRequest).GetEnd())
}
})
}
@@ -345,7 +345,7 @@ func TestLimitsMiddleware_CreationGracePeriod(t *testing.T) {
delta := float64(5000)
require.Len(t, inner.Calls, 1)
- assert.InDelta(t, util.TimeToMillis(testData.expectedEndTime), inner.Calls[0].Arguments.Get(1).(Request).GetEnd(), delta)
+ assert.InDelta(t, util.TimeToMillis(testData.expectedEndTime), inner.Calls[0].Arguments.Get(1).(MetricsQueryRequest).GetEnd(), delta)
})
}
}
@@ -570,7 +570,7 @@ type mockHandler struct {
mock.Mock
}
-func (m *mockHandler) Do(ctx context.Context, req Request) (Response, error) {
+func (m *mockHandler) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) {
args := m.Called(ctx, req)
return args.Get(0).(Response), args.Error(1)
}
@@ -596,7 +596,7 @@ func TestLimitedRoundTripper_MaxQueryParallelism(t *testing.T) {
)
codec := newTestPrometheusCodec()
- r, err := codec.EncodeRequest(ctx, &PrometheusRangeQueryRequest{
+ r, err := codec.EncodeMetricsQueryRequest(ctx, &PrometheusRangeQueryRequest{
Path: "/api/v1/query_range",
Start: time.Now().Add(time.Hour).Unix(),
End: util.TimeToMillis(time.Now()),
@@ -606,8 +606,8 @@ func TestLimitedRoundTripper_MaxQueryParallelism(t *testing.T) {
require.Nil(t, err)
_, err = newLimitedParallelismRoundTripper(downstream, codec, mockLimits{maxQueryParallelism: maxQueryParallelism},
- MiddlewareFunc(func(next Handler) Handler {
- return HandlerFunc(func(c context.Context, _ Request) (Response, error) {
+ MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
+ return HandlerFunc(func(c context.Context, _ MetricsQueryRequest) (Response, error) {
var wg sync.WaitGroup
for i := 0; i < maxQueryParallelism+20; i++ {
wg.Add(1)
@@ -640,7 +640,7 @@ func TestLimitedRoundTripper_MaxQueryParallelismLateScheduling(t *testing.T) {
)
codec := newTestPrometheusCodec()
- r, err := codec.EncodeRequest(ctx, &PrometheusRangeQueryRequest{
+ r, err := codec.EncodeMetricsQueryRequest(ctx, &PrometheusRangeQueryRequest{
Path: "/api/v1/query_range",
Start: time.Now().Add(time.Hour).Unix(),
End: util.TimeToMillis(time.Now()),
@@ -650,8 +650,8 @@ func TestLimitedRoundTripper_MaxQueryParallelismLateScheduling(t *testing.T) {
require.Nil(t, err)
_, err = newLimitedParallelismRoundTripper(downstream, codec, mockLimits{maxQueryParallelism: maxQueryParallelism},
- MiddlewareFunc(func(next Handler) Handler {
- return HandlerFunc(func(c context.Context, _ Request) (Response, error) {
+ MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
+ return HandlerFunc(func(c context.Context, _ MetricsQueryRequest) (Response, error) {
// fire up work and we don't wait.
for i := 0; i < 10; i++ {
go func() {
@@ -681,7 +681,7 @@ func TestLimitedRoundTripper_OriginalRequestContextCancellation(t *testing.T) {
)
codec := newTestPrometheusCodec()
- r, err := codec.EncodeRequest(reqCtx, &PrometheusRangeQueryRequest{
+ r, err := codec.EncodeMetricsQueryRequest(reqCtx, &PrometheusRangeQueryRequest{
Path: "/api/v1/query_range",
Start: time.Now().Add(time.Hour).Unix(),
End: util.TimeToMillis(time.Now()),
@@ -691,8 +691,8 @@ func TestLimitedRoundTripper_OriginalRequestContextCancellation(t *testing.T) {
require.Nil(t, err)
_, err = newLimitedParallelismRoundTripper(downstream, codec, mockLimits{maxQueryParallelism: maxQueryParallelism},
- MiddlewareFunc(func(next Handler) Handler {
- return HandlerFunc(func(c context.Context, _ Request) (Response, error) {
+ MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
+ return HandlerFunc(func(c context.Context, _ MetricsQueryRequest) (Response, error) {
var wg sync.WaitGroup
// Fire up some work. Each sub-request will either be blocked in the sleep or in the queue
@@ -738,7 +738,7 @@ func BenchmarkLimitedParallelismRoundTripper(b *testing.B) {
})
codec := newTestPrometheusCodec()
- r, err := codec.EncodeRequest(ctx, &PrometheusRangeQueryRequest{
+ r, err := codec.EncodeMetricsQueryRequest(ctx, &PrometheusRangeQueryRequest{
Path: "/api/v1/query_range",
Start: time.Now().Add(time.Hour).Unix(),
End: util.TimeToMillis(time.Now()),
@@ -750,8 +750,8 @@ func BenchmarkLimitedParallelismRoundTripper(b *testing.B) {
for _, concurrentRequestCount := range []int{1, 10, 100} {
for _, subRequestCount := range []int{1, 2, 5, 10, 20, 50, 100} {
tripper := newLimitedParallelismRoundTripper(downstream, codec, mockLimits{maxQueryParallelism: maxParallelism},
- MiddlewareFunc(func(next Handler) Handler {
- return HandlerFunc(func(c context.Context, _ Request) (Response, error) {
+ MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
+ return HandlerFunc(func(c context.Context, _ MetricsQueryRequest) (Response, error) {
wg := sync.WaitGroup{}
for i := 0; i < subRequestCount; i++ {
wg.Add(1)
diff --git a/pkg/frontend/querymiddleware/model.pb.go b/pkg/frontend/querymiddleware/model.pb.go
index 352d5213135..1ffe7b223f1 100644
--- a/pkg/frontend/querymiddleware/model.pb.go
+++ b/pkg/frontend/querymiddleware/model.pb.go
@@ -19,6 +19,7 @@ import (
math "math"
math_bits "math/bits"
reflect "reflect"
+ strconv "strconv"
strings "strings"
time "time"
)
@@ -35,6 +36,33 @@ var _ = time.Kitchen
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+type MatchType int32
+
+const (
+ EQUAL MatchType = 0
+ NOT_EQUAL MatchType = 1
+ REGEX_MATCH MatchType = 2
+ REGEX_NO_MATCH MatchType = 3
+)
+
+var MatchType_name = map[int32]string{
+ 0: "EQUAL",
+ 1: "NOT_EQUAL",
+ 2: "REGEX_MATCH",
+ 3: "REGEX_NO_MATCH",
+}
+
+var MatchType_value = map[string]int32{
+ "EQUAL": 0,
+ "NOT_EQUAL": 1,
+ "REGEX_MATCH": 2,
+ "REGEX_NO_MATCH": 3,
+}
+
+func (MatchType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_4c16552f9fdb66d8, []int{0}
+}
+
type PrometheusRangeQueryRequest struct {
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"`
@@ -43,7 +71,7 @@ type PrometheusRangeQueryRequest struct {
Timeout time.Duration `protobuf:"bytes,5,opt,name=timeout,proto3,stdduration" json:"timeout"`
Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"`
Options Options `protobuf:"bytes,7,opt,name=options,proto3" json:"options"`
- // ID of the request used by splitAndCacheMiddleware to correlate downstream requests and responses.
+ // ID of the request used to correlate downstream requests and responses.
Id int64 `protobuf:"varint,8,opt,name=id,proto3" json:"id,omitempty"`
// Hints that could be optionally attached to the request to pass down the stack.
// These hints can be used to optimize the query execution.
@@ -150,7 +178,7 @@ type PrometheusInstantQueryRequest struct {
Time int64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"`
Query string `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"`
Options Options `protobuf:"bytes,4,opt,name=options,proto3" json:"options"`
- // ID of the request used by splitAndCacheMiddleware to correlate downstream requests and responses.
+ // ID of the request used to correlate downstream requests and responses.
Id int64 `protobuf:"varint,5,opt,name=id,proto3" json:"id,omitempty"`
// Hints that could be optionally attached to the request to pass down the stack.
// These hints can be used to optimize the query execution.
@@ -231,6 +259,276 @@ func (m *PrometheusInstantQueryRequest) GetHints() *Hints {
return nil
}
+type LabelMatchers struct {
+ MatcherSet []*LabelMatcher `protobuf:"bytes,1,rep,name=matcherSet,proto3" json:"matcherSet,omitempty"`
+}
+
+func (m *LabelMatchers) Reset() { *m = LabelMatchers{} }
+func (*LabelMatchers) ProtoMessage() {}
+func (*LabelMatchers) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4c16552f9fdb66d8, []int{2}
+}
+func (m *LabelMatchers) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LabelMatchers.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LabelMatchers) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelMatchers.Merge(m, src)
+}
+func (m *LabelMatchers) XXX_Size() int {
+ return m.Size()
+}
+func (m *LabelMatchers) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelMatchers.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelMatchers proto.InternalMessageInfo
+
+func (m *LabelMatchers) GetMatcherSet() []*LabelMatcher {
+ if m != nil {
+ return m.MatcherSet
+ }
+ return nil
+}
+
+type LabelMatcher struct {
+ Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=queryrange.MatchType" json:"type,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
+func (*LabelMatcher) ProtoMessage() {}
+func (*LabelMatcher) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4c16552f9fdb66d8, []int{3}
+}
+func (m *LabelMatcher) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LabelMatcher) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelMatcher.Merge(m, src)
+}
+func (m *LabelMatcher) XXX_Size() int {
+ return m.Size()
+}
+func (m *LabelMatcher) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelMatcher.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo
+
+func (m *LabelMatcher) GetType() MatchType {
+ if m != nil {
+ return m.Type
+ }
+ return EQUAL
+}
+
+func (m *LabelMatcher) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *LabelMatcher) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+type PrometheusLabelNamesQueryRequest struct {
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"`
+ End int64 `protobuf:"varint,3,opt,name=end,proto3" json:"end,omitempty"`
+ // labelMatcherSets is a repeated field here in order to enable the representation
+ // of labels queries which have not yet been split; the prometheus querier code
+ // will eventually split requests like `?match[]=up&match[]=process_start_time_seconds{job="prometheus"}`
+ // into separate queries, one for each matcher set
+ LabelMatcherSets []string `protobuf:"bytes,4,rep,name=labelMatcherSets,proto3" json:"labelMatcherSets,omitempty"`
+ // ID of the request used to correlate downstream requests and responses.
+ Id int64 `protobuf:"varint,5,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *PrometheusLabelNamesQueryRequest) Reset() { *m = PrometheusLabelNamesQueryRequest{} }
+func (*PrometheusLabelNamesQueryRequest) ProtoMessage() {}
+func (*PrometheusLabelNamesQueryRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4c16552f9fdb66d8, []int{4}
+}
+func (m *PrometheusLabelNamesQueryRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PrometheusLabelNamesQueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PrometheusLabelNamesQueryRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PrometheusLabelNamesQueryRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PrometheusLabelNamesQueryRequest.Merge(m, src)
+}
+func (m *PrometheusLabelNamesQueryRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *PrometheusLabelNamesQueryRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PrometheusLabelNamesQueryRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PrometheusLabelNamesQueryRequest proto.InternalMessageInfo
+
+func (m *PrometheusLabelNamesQueryRequest) GetPath() string {
+ if m != nil {
+ return m.Path
+ }
+ return ""
+}
+
+func (m *PrometheusLabelNamesQueryRequest) GetStart() int64 {
+ if m != nil {
+ return m.Start
+ }
+ return 0
+}
+
+func (m *PrometheusLabelNamesQueryRequest) GetEnd() int64 {
+ if m != nil {
+ return m.End
+ }
+ return 0
+}
+
+func (m *PrometheusLabelNamesQueryRequest) GetLabelMatcherSets() []string {
+ if m != nil {
+ return m.LabelMatcherSets
+ }
+ return nil
+}
+
+func (m *PrometheusLabelNamesQueryRequest) GetId() int64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+type PrometheusLabelValuesQueryRequest struct {
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ LabelName string `protobuf:"bytes,2,opt,name=labelName,proto3" json:"labelName,omitempty"`
+ Start int64 `protobuf:"varint,3,opt,name=start,proto3" json:"start,omitempty"`
+ End int64 `protobuf:"varint,4,opt,name=end,proto3" json:"end,omitempty"`
+ // labelMatcherSets is a repeated field here in order to enable the representation
+ // of labels queries which have not yet been split; the prometheus querier code
+ // will eventually split requests like `?match[]=up&match[]=process_start_time_seconds{job="prometheus"}`
+ // into separate queries, one for each matcher set
+ LabelMatcherSets []string `protobuf:"bytes,5,rep,name=labelMatcherSets,proto3" json:"labelMatcherSets,omitempty"`
+ // ID of the request used to correlate downstream requests and responses.
+ Id int64 `protobuf:"varint,6,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *PrometheusLabelValuesQueryRequest) Reset() { *m = PrometheusLabelValuesQueryRequest{} }
+func (*PrometheusLabelValuesQueryRequest) ProtoMessage() {}
+func (*PrometheusLabelValuesQueryRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4c16552f9fdb66d8, []int{5}
+}
+func (m *PrometheusLabelValuesQueryRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PrometheusLabelValuesQueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PrometheusLabelValuesQueryRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PrometheusLabelValuesQueryRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PrometheusLabelValuesQueryRequest.Merge(m, src)
+}
+func (m *PrometheusLabelValuesQueryRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *PrometheusLabelValuesQueryRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PrometheusLabelValuesQueryRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PrometheusLabelValuesQueryRequest proto.InternalMessageInfo
+
+func (m *PrometheusLabelValuesQueryRequest) GetPath() string {
+ if m != nil {
+ return m.Path
+ }
+ return ""
+}
+
+func (m *PrometheusLabelValuesQueryRequest) GetLabelName() string {
+ if m != nil {
+ return m.LabelName
+ }
+ return ""
+}
+
+func (m *PrometheusLabelValuesQueryRequest) GetStart() int64 {
+ if m != nil {
+ return m.Start
+ }
+ return 0
+}
+
+func (m *PrometheusLabelValuesQueryRequest) GetEnd() int64 {
+ if m != nil {
+ return m.End
+ }
+ return 0
+}
+
+func (m *PrometheusLabelValuesQueryRequest) GetLabelMatcherSets() []string {
+ if m != nil {
+ return m.LabelMatcherSets
+ }
+ return nil
+}
+
+func (m *PrometheusLabelValuesQueryRequest) GetId() int64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
type PrometheusResponseHeader struct {
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"-"`
Values []string `protobuf:"bytes,2,rep,name=Values,proto3" json:"-"`
@@ -239,7 +537,7 @@ type PrometheusResponseHeader struct {
func (m *PrometheusResponseHeader) Reset() { *m = PrometheusResponseHeader{} }
func (*PrometheusResponseHeader) ProtoMessage() {}
func (*PrometheusResponseHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{2}
+ return fileDescriptor_4c16552f9fdb66d8, []int{6}
}
func (m *PrometheusResponseHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -294,7 +592,7 @@ type PrometheusResponse struct {
func (m *PrometheusResponse) Reset() { *m = PrometheusResponse{} }
func (*PrometheusResponse) ProtoMessage() {}
func (*PrometheusResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{3}
+ return fileDescriptor_4c16552f9fdb66d8, []int{7}
}
func (m *PrometheusResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -373,7 +671,7 @@ type PrometheusData struct {
func (m *PrometheusData) Reset() { *m = PrometheusData{} }
func (*PrometheusData) ProtoMessage() {}
func (*PrometheusData) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{4}
+ return fileDescriptor_4c16552f9fdb66d8, []int{8}
}
func (m *PrometheusData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -425,7 +723,7 @@ type SampleStream struct {
func (m *SampleStream) Reset() { *m = SampleStream{} }
func (*SampleStream) ProtoMessage() {}
func (*SampleStream) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{5}
+ return fileDescriptor_4c16552f9fdb66d8, []int{9}
}
func (m *SampleStream) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -477,7 +775,7 @@ type CachedResponse struct {
func (m *CachedResponse) Reset() { *m = CachedResponse{} }
func (*CachedResponse) ProtoMessage() {}
func (*CachedResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{6}
+ return fileDescriptor_4c16552f9fdb66d8, []int{10}
}
func (m *CachedResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -534,7 +832,7 @@ type Extent struct {
func (m *Extent) Reset() { *m = Extent{} }
func (*Extent) ProtoMessage() {}
func (*Extent) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{7}
+ return fileDescriptor_4c16552f9fdb66d8, []int{11}
}
func (m *Extent) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -610,7 +908,7 @@ type Options struct {
func (m *Options) Reset() { *m = Options{} }
func (*Options) ProtoMessage() {}
func (*Options) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{8}
+ return fileDescriptor_4c16552f9fdb66d8, []int{12}
}
func (m *Options) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -689,7 +987,7 @@ type Hints struct {
func (m *Hints) Reset() { *m = Hints{} }
func (*Hints) ProtoMessage() {}
func (*Hints) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{9}
+ return fileDescriptor_4c16552f9fdb66d8, []int{13}
}
func (m *Hints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -766,7 +1064,7 @@ type QueryStatistics struct {
func (m *QueryStatistics) Reset() { *m = QueryStatistics{} }
func (*QueryStatistics) ProtoMessage() {}
func (*QueryStatistics) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{10}
+ return fileDescriptor_4c16552f9fdb66d8, []int{14}
}
func (m *QueryStatistics) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -818,7 +1116,7 @@ type CachedHTTPResponse struct {
func (m *CachedHTTPResponse) Reset() { *m = CachedHTTPResponse{} }
func (*CachedHTTPResponse) ProtoMessage() {}
func (*CachedHTTPResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{11}
+ return fileDescriptor_4c16552f9fdb66d8, []int{15}
}
func (m *CachedHTTPResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -884,7 +1182,7 @@ type CachedHTTPHeader struct {
func (m *CachedHTTPHeader) Reset() { *m = CachedHTTPHeader{} }
func (*CachedHTTPHeader) ProtoMessage() {}
func (*CachedHTTPHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_4c16552f9fdb66d8, []int{12}
+ return fileDescriptor_4c16552f9fdb66d8, []int{16}
}
func (m *CachedHTTPHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -928,8 +1226,13 @@ func (m *CachedHTTPHeader) GetValue() string {
}
func init() {
+ proto.RegisterEnum("queryrange.MatchType", MatchType_name, MatchType_value)
proto.RegisterType((*PrometheusRangeQueryRequest)(nil), "queryrange.PrometheusRangeQueryRequest")
proto.RegisterType((*PrometheusInstantQueryRequest)(nil), "queryrange.PrometheusInstantQueryRequest")
+ proto.RegisterType((*LabelMatchers)(nil), "queryrange.LabelMatchers")
+ proto.RegisterType((*LabelMatcher)(nil), "queryrange.LabelMatcher")
+ proto.RegisterType((*PrometheusLabelNamesQueryRequest)(nil), "queryrange.PrometheusLabelNamesQueryRequest")
+ proto.RegisterType((*PrometheusLabelValuesQueryRequest)(nil), "queryrange.PrometheusLabelValuesQueryRequest")
proto.RegisterType((*PrometheusResponseHeader)(nil), "queryrange.PrometheusResponseHeader")
proto.RegisterType((*PrometheusResponse)(nil), "queryrange.PrometheusResponse")
proto.RegisterType((*PrometheusData)(nil), "queryrange.PrometheusData")
@@ -946,86 +1249,105 @@ func init() {
func init() { proto.RegisterFile("model.proto", fileDescriptor_4c16552f9fdb66d8) }
var fileDescriptor_4c16552f9fdb66d8 = []byte{
- // 1223 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcd, 0x72, 0x1b, 0x45,
- 0x10, 0xd6, 0xea, 0x5f, 0x2d, 0x63, 0x9b, 0xb1, 0x81, 0x75, 0x42, 0x76, 0x55, 0x5b, 0x39, 0x18,
- 0x2a, 0x91, 0xc1, 0x01, 0x0e, 0x14, 0x50, 0x64, 0x1d, 0x53, 0x0e, 0xbf, 0x66, 0xec, 0x82, 0x2a,
- 0x2e, 0xae, 0x91, 0x76, 0x22, 0x2d, 0xd9, 0xbf, 0xcc, 0x8e, 0x92, 0xe8, 0x46, 0xf1, 0x00, 0x14,
- 0x47, 0x4e, 0x9c, 0x79, 0x02, 0x9e, 0x21, 0xc7, 0x70, 0x0b, 0x39, 0x08, 0xa2, 0x14, 0x55, 0x94,
- 0x4e, 0x79, 0x04, 0x6a, 0x7a, 0x76, 0xa5, 0xb5, 0x2d, 0x8a, 0x70, 0x91, 0x7a, 0xba, 0xbf, 0xee,
- 0xf9, 0xba, 0x67, 0xf6, 0x1b, 0x68, 0x87, 0xb1, 0xc7, 0x83, 0x6e, 0x22, 0x62, 0x19, 0x13, 0xb8,
- 0x33, 0xe2, 0x62, 0x2c, 0x58, 0x34, 0xe0, 0x17, 0xae, 0x0e, 0x7c, 0x39, 0x1c, 0xf5, 0xba, 0xfd,
- 0x38, 0xdc, 0x19, 0xc4, 0x83, 0x78, 0x07, 0x21, 0xbd, 0xd1, 0x2d, 0x5c, 0xe1, 0x02, 0x2d, 0x9d,
- 0x7a, 0xc1, 0x1a, 0xc4, 0xf1, 0x20, 0xe0, 0x0b, 0x94, 0x37, 0x12, 0x4c, 0xfa, 0x71, 0x94, 0xc5,
- 0xdf, 0x28, 0x96, 0x13, 0xec, 0x16, 0x8b, 0xd8, 0x4e, 0xe8, 0x87, 0xbe, 0xd8, 0x49, 0x6e, 0x0f,
- 0xb4, 0x95, 0xf4, 0xf4, 0x7f, 0x96, 0xb1, 0x75, 0xb6, 0x22, 0x8b, 0xc6, 0x3a, 0xe4, 0xfc, 0x5a,
- 0x86, 0x8b, 0x87, 0x22, 0x0e, 0xb9, 0x1c, 0xf2, 0x51, 0x4a, 0x15, 0xdf, 0x2f, 0x15, 0x73, 0xca,
- 0xef, 0x8c, 0x78, 0x2a, 0x09, 0x81, 0x6a, 0xc2, 0xe4, 0xd0, 0x34, 0x3a, 0xc6, 0x76, 0x8b, 0xa2,
- 0x4d, 0x36, 0xa1, 0x96, 0x4a, 0x26, 0xa4, 0x59, 0xee, 0x18, 0xdb, 0x15, 0xaa, 0x17, 0x64, 0x1d,
- 0x2a, 0x3c, 0xf2, 0xcc, 0x0a, 0xfa, 0x94, 0xa9, 0x72, 0x53, 0xc9, 0x13, 0xb3, 0x8a, 0x2e, 0xb4,
- 0xc9, 0xfb, 0xd0, 0x90, 0x7e, 0xc8, 0xe3, 0x91, 0x34, 0x6b, 0x1d, 0x63, 0xbb, 0xbd, 0xbb, 0xd5,
- 0xd5, 0xe4, 0xba, 0x39, 0xb9, 0xee, 0x8d, 0xac, 0x5d, 0xb7, 0xf9, 0x60, 0x62, 0x97, 0x7e, 0xfa,
- 0xc3, 0x36, 0x68, 0x9e, 0xa3, 0xb6, 0xc6, 0xc1, 0x9a, 0x75, 0xe4, 0xa3, 0x17, 0xe4, 0x1a, 0x34,
- 0xe2, 0x44, 0xa5, 0xa4, 0x66, 0x03, 0x8b, 0x6e, 0x74, 0x17, 0xe3, 0xef, 0x7e, 0xa1, 0x43, 0x6e,
- 0x55, 0x95, 0xa3, 0x39, 0x92, 0xac, 0x42, 0xd9, 0xf7, 0xcc, 0x26, 0x72, 0x2b, 0xfb, 0x1e, 0xb9,
- 0x0a, 0xb5, 0xa1, 0x1f, 0xc9, 0xd4, 0x6c, 0x61, 0x89, 0x17, 0x8b, 0x25, 0x0e, 0x54, 0x00, 0x0b,
- 0x18, 0x54, 0xa3, 0x9c, 0xdf, 0x0c, 0xb8, 0xb4, 0x18, 0xdc, 0xcd, 0x28, 0x95, 0x2c, 0x92, 0xff,
- 0x39, 0x3a, 0x02, 0x55, 0xd5, 0x4a, 0x36, 0x39, 0xb4, 0x17, 0x3d, 0x55, 0xfe, 0xa5, 0xa7, 0xea,
- 0xff, 0xec, 0xa9, 0x76, 0xbe, 0xa7, 0xfa, 0x73, 0xf5, 0x74, 0x0c, 0x66, 0xe1, 0x2e, 0xf0, 0x34,
- 0x89, 0xa3, 0x94, 0x1f, 0x70, 0xe6, 0x71, 0x41, 0xb6, 0xa0, 0xfa, 0x39, 0x0b, 0xb9, 0xee, 0xc6,
- 0xad, 0xcd, 0x26, 0xb6, 0x71, 0x95, 0xa2, 0x8b, 0x5c, 0x82, 0xfa, 0x57, 0x2c, 0x18, 0xf1, 0xd4,
- 0x2c, 0x77, 0x2a, 0x8b, 0x60, 0xe6, 0x74, 0x7e, 0x2f, 0x03, 0x39, 0x5f, 0x96, 0x38, 0x50, 0x3f,
- 0x92, 0x4c, 0x8e, 0xd2, 0xac, 0x24, 0xcc, 0x26, 0x76, 0x3d, 0x45, 0x0f, 0xcd, 0x22, 0xc4, 0x85,
- 0xea, 0x0d, 0x26, 0x19, 0x8e, 0xab, 0xbd, 0x7b, 0xa1, 0x48, 0x7f, 0x51, 0x51, 0x21, 0x5c, 0x32,
- 0x9b, 0xd8, 0xab, 0x1e, 0x93, 0xec, 0x4a, 0x1c, 0xfa, 0x92, 0x87, 0x89, 0x1c, 0x53, 0xcc, 0x25,
- 0x6f, 0x43, 0x6b, 0x5f, 0x88, 0x58, 0x1c, 0x8f, 0x13, 0xae, 0x47, 0xec, 0xbe, 0x32, 0x9b, 0xd8,
- 0x1b, 0x3c, 0x77, 0x16, 0x32, 0x16, 0x48, 0xf2, 0x1a, 0xd4, 0x70, 0x81, 0xd3, 0x6f, 0xb9, 0x1b,
- 0xb3, 0x89, 0xbd, 0x86, 0x29, 0x05, 0xb8, 0x46, 0x90, 0x7d, 0x68, 0xe8, 0x21, 0xa5, 0x66, 0xad,
- 0x53, 0xd9, 0x6e, 0xef, 0x5e, 0x5e, 0x4e, 0xf4, 0xf4, 0x44, 0xf3, 0x31, 0xe5, 0xb9, 0x64, 0x17,
- 0x9a, 0x5f, 0x33, 0x11, 0xf9, 0xd1, 0x40, 0x9d, 0x97, 0x1a, 0xe4, 0xcb, 0xb3, 0x89, 0x4d, 0xee,
- 0x65, 0xbe, 0xc2, 0xbe, 0x73, 0x9c, 0xf3, 0xbd, 0x01, 0xab, 0xa7, 0x27, 0x41, 0xba, 0x00, 0x94,
- 0xa7, 0xa3, 0x40, 0x62, 0xc3, 0x7a, 0xb6, 0xab, 0xb3, 0x89, 0x0d, 0x62, 0xee, 0xa5, 0x05, 0x04,
- 0xf9, 0x10, 0xea, 0x7a, 0x85, 0xa7, 0xd7, 0xde, 0x35, 0x8b, 0xe4, 0x8f, 0x58, 0x98, 0x04, 0xfc,
- 0x48, 0x0a, 0xce, 0x42, 0x77, 0x55, 0x5d, 0x36, 0x75, 0x4a, 0xba, 0x12, 0xcd, 0xf2, 0x9c, 0x1f,
- 0xca, 0xb0, 0x52, 0x04, 0x92, 0x04, 0xea, 0x01, 0xeb, 0xf1, 0x40, 0x1d, 0x6d, 0x05, 0xaf, 0x6e,
- 0x3f, 0x16, 0x92, 0xdf, 0x4f, 0x7a, 0xdd, 0x4f, 0x95, 0xff, 0x90, 0xf9, 0xc2, 0xdd, 0x53, 0xd5,
- 0x1e, 0x4f, 0xec, 0x37, 0x9f, 0x47, 0xce, 0x74, 0xde, 0x75, 0x8f, 0x25, 0x92, 0x0b, 0x45, 0x21,
- 0xe4, 0x52, 0xf8, 0x7d, 0x9a, 0xed, 0x43, 0xde, 0x85, 0x46, 0x8a, 0x0c, 0xd2, 0xac, 0x8b, 0xf5,
- 0xc5, 0x96, 0x9a, 0xda, 0x82, 0xfd, 0x5d, 0xbc, 0x96, 0x34, 0x4f, 0x20, 0x87, 0x00, 0x43, 0x3f,
- 0x95, 0xf1, 0x40, 0xb0, 0x30, 0x35, 0x2b, 0x98, 0xfe, 0xea, 0x22, 0xfd, 0xa3, 0x20, 0x66, 0xf2,
- 0x20, 0x07, 0x20, 0x75, 0x92, 0x95, 0x2a, 0xe4, 0xd1, 0x82, 0xed, 0x7c, 0x0b, 0xab, 0x7b, 0xac,
- 0x3f, 0xe4, 0xde, 0xfc, 0xb2, 0x6f, 0x41, 0xe5, 0x36, 0x1f, 0x67, 0xa7, 0xd1, 0x98, 0x4d, 0x6c,
- 0xb5, 0xa4, 0xea, 0x47, 0x29, 0x22, 0xbf, 0x2f, 0xb9, 0xfa, 0x4a, 0x35, 0x75, 0x52, 0x3c, 0x80,
- 0x7d, 0x0c, 0xb9, 0x6b, 0xd9, 0x8e, 0x39, 0x94, 0xe6, 0x86, 0xf3, 0xd8, 0x80, 0xba, 0x06, 0x11,
- 0x3b, 0xd7, 0x65, 0xb5, 0x4d, 0xc5, 0x6d, 0xcd, 0x26, 0xb6, 0x76, 0xe4, 0x12, 0xbd, 0xa5, 0x25,
- 0x1a, 0xc5, 0x47, 0xb3, 0xe0, 0x91, 0xa7, 0xb5, 0xba, 0x03, 0x4d, 0x29, 0x58, 0x9f, 0x9f, 0xf8,
- 0x5e, 0x76, 0xe3, 0xf3, 0xeb, 0x89, 0xee, 0x9b, 0x1e, 0xf9, 0x00, 0x9a, 0x22, 0x6b, 0x27, 0x93,
- 0xee, 0xcd, 0x73, 0xd2, 0x7d, 0x3d, 0x1a, 0xbb, 0x2b, 0xb3, 0x89, 0x3d, 0x47, 0xd2, 0xb9, 0x45,
- 0xae, 0x00, 0xc1, 0xbe, 0x4e, 0x94, 0xe8, 0xa5, 0x92, 0x85, 0xc9, 0x49, 0xa8, 0x85, 0xa9, 0x42,
- 0xd7, 0x31, 0x72, 0x9c, 0x07, 0x3e, 0x4b, 0x3f, 0xae, 0x36, 0x2b, 0xeb, 0x55, 0xe7, 0x2f, 0x03,
- 0x1a, 0x99, 0xd4, 0x91, 0xcb, 0xf0, 0x02, 0x0e, 0xf5, 0x86, 0x9f, 0xb2, 0x5e, 0xc0, 0x3d, 0xec,
- 0xb2, 0x49, 0x4f, 0x3b, 0xc9, 0xeb, 0xb0, 0x7e, 0x34, 0x64, 0xc2, 0xf3, 0xa3, 0xc1, 0x1c, 0x58,
- 0x46, 0xe0, 0x39, 0x3f, 0xe9, 0x40, 0xfb, 0x38, 0x96, 0x2c, 0xc0, 0x40, 0x8a, 0xda, 0x50, 0xa3,
- 0x45, 0x17, 0xd9, 0x85, 0xcd, 0x4c, 0xd9, 0x8f, 0x92, 0xc0, 0x97, 0xf3, 0x8a, 0x55, 0xac, 0xb8,
- 0x34, 0x76, 0x36, 0xe7, 0x66, 0x24, 0xb9, 0xb8, 0xcb, 0x82, 0x4c, 0x95, 0x97, 0xc6, 0x9c, 0xfb,
- 0x50, 0x43, 0x39, 0x26, 0x0e, 0xac, 0xe0, 0xfe, 0xea, 0x21, 0xf1, 0xb9, 0x96, 0xc6, 0x1a, 0x3d,
- 0xe5, 0x23, 0x6f, 0xc1, 0xe6, 0x7e, 0x2a, 0xfd, 0x90, 0x49, 0xee, 0x1d, 0xa1, 0x6b, 0x2f, 0x1e,
- 0x45, 0xfa, 0x35, 0xae, 0x1e, 0x94, 0xe8, 0xd2, 0xa8, 0xfb, 0x12, 0x6c, 0xec, 0x61, 0xff, 0x2c,
- 0xf0, 0xe5, 0x38, 0x87, 0x38, 0xfb, 0xb0, 0x86, 0x8f, 0x96, 0x12, 0x5c, 0x3f, 0x95, 0x7e, 0x1f,
- 0x9b, 0x5e, 0x5a, 0x5f, 0x71, 0xa9, 0x2e, 0xaf, 0xee, 0xfc, 0x6c, 0x00, 0xd1, 0x57, 0xfe, 0xe0,
- 0xf8, 0xf8, 0x70, 0x7e, 0xed, 0x2f, 0x42, 0xab, 0xaf, 0xbc, 0x27, 0xf3, 0xcb, 0x4f, 0x9b, 0xe8,
- 0xf8, 0x84, 0x8f, 0x89, 0x0d, 0x6d, 0x2d, 0xf7, 0x27, 0xfd, 0xd8, 0xd3, 0x4f, 0x62, 0x8d, 0x82,
- 0x76, 0xed, 0xc5, 0x1e, 0x27, 0xef, 0x40, 0x63, 0x98, 0xe9, 0x6a, 0xfe, 0x55, 0x16, 0xbe, 0x8c,
- 0xc5, 0x76, 0x5a, 0x40, 0x69, 0x0e, 0x56, 0x8f, 0x6c, 0x2f, 0xf6, 0xc6, 0x78, 0x4a, 0x2b, 0x14,
- 0x6d, 0xe7, 0x3d, 0x58, 0x3f, 0x9b, 0xa0, 0x70, 0xd1, 0xfc, 0x49, 0xa3, 0x68, 0xab, 0xc7, 0x18,
- 0xf5, 0x01, 0xe9, 0xb4, 0xa8, 0x5e, 0xb8, 0xfb, 0x0f, 0x9f, 0x58, 0xa5, 0x47, 0x4f, 0xac, 0xd2,
- 0xb3, 0x27, 0x96, 0xf1, 0xdd, 0xd4, 0x32, 0x7e, 0x99, 0x5a, 0xc6, 0x83, 0xa9, 0x65, 0x3c, 0x9c,
- 0x5a, 0xc6, 0x9f, 0x53, 0xcb, 0xf8, 0x7b, 0x6a, 0x95, 0x9e, 0x4d, 0x2d, 0xe3, 0xc7, 0xa7, 0x56,
- 0xe9, 0xe1, 0x53, 0xab, 0xf4, 0xe8, 0xa9, 0x55, 0xfa, 0x66, 0x0d, 0xd9, 0x86, 0xbe, 0xe7, 0x05,
- 0xfc, 0x1e, 0x13, 0xbc, 0x57, 0xc7, 0x0f, 0xe5, 0xda, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xaa,
- 0xd5, 0x7b, 0x7d, 0x2a, 0x0a, 0x00, 0x00,
+ // 1413 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4b, 0x6f, 0x1b, 0x47,
+ 0x12, 0xe6, 0xf0, 0xcd, 0xa2, 0x4c, 0x71, 0x5b, 0xf2, 0x2e, 0xe5, 0x07, 0x47, 0x3b, 0xf0, 0x41,
+ 0x36, 0x6c, 0x6a, 0x57, 0xde, 0x5d, 0x2c, 0x16, 0x9b, 0x20, 0x1a, 0x99, 0x89, 0xe4, 0xf8, 0x21,
+ 0x37, 0x99, 0x07, 0x72, 0x11, 0x9a, 0x9c, 0x36, 0x39, 0xf1, 0xbc, 0x3c, 0xd3, 0xb4, 0xcd, 0x5b,
+ 0x90, 0x1f, 0x10, 0xe4, 0x18, 0xe4, 0x90, 0x73, 0x7e, 0x41, 0x80, 0xfc, 0x03, 0x1f, 0x9d, 0x9b,
+ 0xe3, 0x03, 0x13, 0xcb, 0x08, 0x10, 0xf0, 0xe4, 0x9f, 0x10, 0x74, 0xf5, 0x0c, 0x39, 0x92, 0x18,
+ 0xdb, 0x01, 0x72, 0x21, 0xab, 0xab, 0xbe, 0xaa, 0xfe, 0xaa, 0xba, 0xbb, 0x6a, 0xa0, 0xea, 0xfa,
+ 0x16, 0x77, 0x5a, 0x41, 0xe8, 0x0b, 0x9f, 0xc0, 0xfd, 0x11, 0x0f, 0xc7, 0x21, 0xf3, 0x06, 0xfc,
+ 0xcc, 0x95, 0x81, 0x2d, 0x86, 0xa3, 0x5e, 0xab, 0xef, 0xbb, 0x9b, 0x03, 0x7f, 0xe0, 0x6f, 0x22,
+ 0xa4, 0x37, 0xba, 0x8b, 0x2b, 0x5c, 0xa0, 0xa4, 0x5c, 0xcf, 0x34, 0x07, 0xbe, 0x3f, 0x70, 0xf8,
+ 0x1c, 0x65, 0x8d, 0x42, 0x26, 0x6c, 0xdf, 0x8b, 0xed, 0xff, 0x48, 0x87, 0x0b, 0xd9, 0x5d, 0xe6,
+ 0xb1, 0x4d, 0xd7, 0x76, 0xed, 0x70, 0x33, 0xb8, 0x37, 0x50, 0x52, 0xd0, 0x53, 0xff, 0xb1, 0xc7,
+ 0xda, 0xf1, 0x88, 0xcc, 0x1b, 0x2b, 0x93, 0xf1, 0x5d, 0x16, 0xce, 0xee, 0x87, 0xbe, 0xcb, 0xc5,
+ 0x90, 0x8f, 0x22, 0x2a, 0xf9, 0xde, 0x91, 0xcc, 0x29, 0xbf, 0x3f, 0xe2, 0x91, 0x20, 0x04, 0xf2,
+ 0x01, 0x13, 0xc3, 0x86, 0xb6, 0xae, 0x6d, 0x54, 0x28, 0xca, 0x64, 0x15, 0x0a, 0x91, 0x60, 0xa1,
+ 0x68, 0x64, 0xd7, 0xb5, 0x8d, 0x1c, 0x55, 0x0b, 0x52, 0x87, 0x1c, 0xf7, 0xac, 0x46, 0x0e, 0x75,
+ 0x52, 0x94, 0xbe, 0x91, 0xe0, 0x41, 0x23, 0x8f, 0x2a, 0x94, 0xc9, 0x5b, 0x50, 0x12, 0xb6, 0xcb,
+ 0xfd, 0x91, 0x68, 0x14, 0xd6, 0xb5, 0x8d, 0xea, 0xd6, 0x5a, 0x4b, 0x91, 0x6b, 0x25, 0xe4, 0x5a,
+ 0xd7, 0xe2, 0x74, 0xcd, 0xf2, 0xe3, 0x89, 0x9e, 0xf9, 0xea, 0x27, 0x5d, 0xa3, 0x89, 0x8f, 0xdc,
+ 0x1a, 0x0b, 0xdb, 0x28, 0x22, 0x1f, 0xb5, 0x20, 0x57, 0xa1, 0xe4, 0x07, 0xd2, 0x25, 0x6a, 0x94,
+ 0x30, 0xe8, 0x4a, 0x6b, 0x5e, 0xfe, 0xd6, 0x6d, 0x65, 0x32, 0xf3, 0x32, 0x1c, 0x4d, 0x90, 0xa4,
+ 0x06, 0x59, 0xdb, 0x6a, 0x94, 0x91, 0x5b, 0xd6, 0xb6, 0xc8, 0x15, 0x28, 0x0c, 0x6d, 0x4f, 0x44,
+ 0x8d, 0x0a, 0x86, 0xf8, 0x4b, 0x3a, 0xc4, 0xae, 0x34, 0x60, 0x00, 0x8d, 0x2a, 0x94, 0xf1, 0x83,
+ 0x06, 0xe7, 0xe7, 0x85, 0xdb, 0xf3, 0x22, 0xc1, 0x3c, 0xf1, 0xda, 0xd2, 0x11, 0xc8, 0xcb, 0x54,
+ 0xe2, 0xca, 0xa1, 0x3c, 0xcf, 0x29, 0xf7, 0x3b, 0x39, 0xe5, 0xff, 0x60, 0x4e, 0x85, 0x93, 0x39,
+ 0x15, 0xdf, 0x28, 0xa7, 0x3d, 0x38, 0x75, 0x83, 0xf5, 0xb8, 0x73, 0x93, 0x89, 0xfe, 0x90, 0x87,
+ 0x11, 0xf9, 0x2f, 0x80, 0xab, 0xe4, 0x0e, 0x17, 0x0d, 0x6d, 0x3d, 0xb7, 0x51, 0xdd, 0x6a, 0xa4,
+ 0x83, 0xa4, 0xe1, 0x34, 0x85, 0x35, 0xfa, 0xb0, 0x94, 0xb6, 0x91, 0x8b, 0x90, 0x17, 0xe3, 0x80,
+ 0x63, 0x31, 0x6a, 0x5b, 0xa7, 0xd3, 0x31, 0x10, 0xd2, 0x1d, 0x07, 0x9c, 0x22, 0x44, 0xd6, 0xc8,
+ 0x63, 0x71, 0x8d, 0x2a, 0x14, 0x65, 0x59, 0xa3, 0x07, 0xcc, 0x19, 0xf1, 0xa4, 0x46, 0xb8, 0x30,
+ 0xbe, 0xd6, 0x60, 0x7d, 0x7e, 0x06, 0xb8, 0xdf, 0x2d, 0xe6, 0xf2, 0xe8, 0x4f, 0xbb, 0xc1, 0x97,
+ 0xa0, 0xee, 0xa4, 0xb2, 0xe8, 0x70, 0x21, 0x4f, 0x23, 0xb7, 0x51, 0xa1, 0x27, 0xf4, 0xc7, 0x6b,
+ 0x6f, 0x7c, 0xaf, 0xc1, 0xdf, 0x8f, 0x91, 0xfb, 0x50, 0xb2, 0x7e, 0x3d, 0xbb, 0x73, 0x50, 0x71,
+ 0x92, 0x5c, 0xe2, 0x2a, 0xcc, 0x15, 0x73, 0xee, 0xb9, 0x05, 0xdc, 0xf3, 0xaf, 0xe6, 0x5e, 0x78,
+ 0x25, 0xf7, 0xe2, 0x8c, 0x7b, 0x17, 0x1a, 0xa9, 0xa6, 0xc0, 0xa3, 0xc0, 0xf7, 0x22, 0xbe, 0xcb,
+ 0x99, 0xc5, 0x43, 0xb2, 0x06, 0x79, 0x24, 0x86, 0x8c, 0xcd, 0xc2, 0x74, 0xa2, 0x6b, 0x57, 0x28,
+ 0xaa, 0xc8, 0x79, 0x28, 0xaa, 0x14, 0x1b, 0x59, 0xb9, 0x51, 0x62, 0x8c, 0x95, 0xc6, 0x8f, 0x59,
+ 0x20, 0x27, 0xc3, 0x12, 0x03, 0x8a, 0x1d, 0xc1, 0xc4, 0x28, 0x8a, 0x43, 0xc2, 0x74, 0xa2, 0x17,
+ 0x23, 0xd4, 0xd0, 0xd8, 0x42, 0x4c, 0xc8, 0x5f, 0x63, 0x82, 0x61, 0x35, 0xaa, 0x5b, 0x67, 0xd2,
+ 0xd7, 0x67, 0x1e, 0x51, 0x22, 0x4c, 0x32, 0x9d, 0xe8, 0x35, 0x8b, 0x09, 0x76, 0xd9, 0x77, 0x6d,
+ 0xc1, 0xdd, 0x40, 0x8c, 0x29, 0xfa, 0x92, 0x7f, 0x43, 0xa5, 0x1d, 0x86, 0x7e, 0x28, 0xaf, 0x9a,
+ 0xba, 0x47, 0xe6, 0xdf, 0xa6, 0x13, 0x7d, 0x85, 0x27, 0xca, 0x94, 0xc7, 0x1c, 0x49, 0x2e, 0x42,
+ 0x01, 0x17, 0x58, 0xdb, 0x8a, 0xb9, 0x32, 0x9d, 0xe8, 0xcb, 0xe8, 0x92, 0x82, 0x2b, 0x04, 0x69,
+ 0x43, 0x49, 0x15, 0x49, 0x55, 0xba, 0xba, 0x75, 0x61, 0x31, 0xd1, 0xa3, 0x15, 0x4d, 0xca, 0x94,
+ 0xf8, 0x92, 0x2d, 0x28, 0x7f, 0xc4, 0x42, 0xcf, 0xf6, 0x06, 0xf2, 0xe1, 0xca, 0x42, 0xfe, 0x75,
+ 0x3a, 0xd1, 0xc9, 0xc3, 0x58, 0x97, 0xda, 0x77, 0x86, 0x33, 0x3e, 0xd7, 0xa0, 0x76, 0xb4, 0x12,
+ 0xa4, 0x05, 0x40, 0x79, 0x34, 0x72, 0x44, 0x37, 0x79, 0x78, 0x15, 0xb3, 0x36, 0x9d, 0xe8, 0x10,
+ 0xce, 0xb4, 0x34, 0x85, 0x20, 0xef, 0x40, 0x51, 0xad, 0xf0, 0xf4, 0x8e, 0x3d, 0xf4, 0x0e, 0x73,
+ 0x03, 0x87, 0x77, 0x44, 0xc8, 0x99, 0x6b, 0xd6, 0x64, 0xd7, 0x91, 0xa7, 0xa4, 0x22, 0xd1, 0xd8,
+ 0xcf, 0xf8, 0x22, 0x0b, 0x4b, 0x69, 0x20, 0x09, 0xa0, 0x88, 0x77, 0x2d, 0x8a, 0x7b, 0xc7, 0x4a,
+ 0xab, 0xef, 0x87, 0x82, 0x3f, 0x0a, 0x7a, 0xaa, 0x73, 0xec, 0x33, 0x3b, 0x34, 0x77, 0x64, 0xb4,
+ 0x67, 0x13, 0xfd, 0x9f, 0x6f, 0x32, 0xd7, 0x94, 0xdf, 0xb6, 0xc5, 0x02, 0xc1, 0x43, 0x49, 0xc1,
+ 0xe5, 0x22, 0xb4, 0xfb, 0x34, 0xde, 0x87, 0xfc, 0x0f, 0x4a, 0x11, 0x32, 0x88, 0xe2, 0x2c, 0xea,
+ 0xf3, 0x2d, 0x15, 0xb5, 0x39, 0x7b, 0x6c, 0x22, 0x11, 0x4d, 0x1c, 0xc8, 0x3e, 0xc0, 0xd0, 0x8e,
+ 0x84, 0x3f, 0x08, 0x99, 0x1b, 0x35, 0x72, 0xe8, 0x7e, 0x6e, 0xee, 0xfe, 0xae, 0xe3, 0x33, 0xb1,
+ 0x9b, 0x00, 0x90, 0x3a, 0x89, 0x43, 0xa5, 0xfc, 0x68, 0x4a, 0x36, 0x3e, 0x85, 0xda, 0x0e, 0xeb,
+ 0x0f, 0xb9, 0x35, 0xbb, 0xec, 0x6b, 0x90, 0xbb, 0xc7, 0xc7, 0xf1, 0x69, 0x94, 0xa6, 0x13, 0x5d,
+ 0x2e, 0xa9, 0xfc, 0x91, 0xa3, 0x91, 0x3f, 0x12, 0x5c, 0xb6, 0x6b, 0x45, 0x9d, 0xa4, 0x0f, 0xa0,
+ 0x8d, 0x26, 0x73, 0x39, 0xde, 0x31, 0x81, 0xd2, 0x44, 0x30, 0x9e, 0x69, 0x50, 0x54, 0x20, 0xa2,
+ 0x27, 0x2d, 0x42, 0x6e, 0x93, 0x33, 0x2b, 0xd3, 0x89, 0xae, 0x14, 0x49, 0xb7, 0x58, 0x53, 0xdd,
+ 0x02, 0xbb, 0x9f, 0x62, 0xc1, 0x3d, 0x4b, 0xb5, 0x8d, 0x75, 0x28, 0x8b, 0x90, 0xf5, 0xf9, 0x81,
+ 0x6d, 0xc5, 0x37, 0x3e, 0xb9, 0x9e, 0xa8, 0xde, 0xb3, 0xc8, 0xdb, 0x50, 0x0e, 0xe3, 0x74, 0xe2,
+ 0x19, 0xbe, 0x7a, 0x62, 0x86, 0x6f, 0x7b, 0x63, 0x73, 0x69, 0x3a, 0xd1, 0x67, 0x48, 0x3a, 0x93,
+ 0xc8, 0x65, 0x20, 0x98, 0xd7, 0x81, 0x9c, 0x7e, 0x91, 0x60, 0x6e, 0x70, 0xe0, 0x46, 0x71, 0xf3,
+ 0xa9, 0xa3, 0xa5, 0x9b, 0x18, 0x6e, 0x46, 0xd7, 0xf3, 0xe5, 0x5c, 0x3d, 0x6f, 0xfc, 0xa2, 0x41,
+ 0x29, 0x9e, 0x79, 0xe4, 0x02, 0x9c, 0xc2, 0xa2, 0x5e, 0xb3, 0x23, 0xd6, 0x73, 0xb8, 0x85, 0x59,
+ 0x96, 0xe9, 0x51, 0xa5, 0x6c, 0x7f, 0x9d, 0x21, 0x0b, 0x2d, 0xdb, 0x1b, 0xcc, 0x80, 0x59, 0x04,
+ 0x9e, 0xd0, 0x93, 0x75, 0xa8, 0x76, 0x7d, 0xc1, 0x1c, 0x34, 0x44, 0xd8, 0x1b, 0x0a, 0x34, 0xad,
+ 0x22, 0x5b, 0xb0, 0x1a, 0x8f, 0xf8, 0x4e, 0xe0, 0xd8, 0x62, 0x16, 0x31, 0x8f, 0x11, 0x17, 0xda,
+ 0x8e, 0xfb, 0xec, 0x79, 0x82, 0x87, 0x0f, 0x98, 0x13, 0x8f, 0x88, 0x85, 0x36, 0xe3, 0x11, 0x14,
+ 0x70, 0x2e, 0x13, 0x03, 0x96, 0x70, 0x7f, 0x39, 0x2c, 0x6c, 0xae, 0x5a, 0x63, 0x81, 0x1e, 0xd1,
+ 0x91, 0x7f, 0xc1, 0x6a, 0x3b, 0x12, 0xb6, 0xcb, 0x04, 0xb7, 0x3a, 0xa8, 0xda, 0xf1, 0x47, 0x9e,
+ 0x1a, 0x6a, 0xf9, 0xdd, 0x0c, 0x5d, 0x68, 0x35, 0x4f, 0xc3, 0xca, 0x0e, 0xe6, 0xcf, 0x1c, 0x5b,
+ 0x8c, 0x13, 0x88, 0xd1, 0x86, 0x65, 0x1c, 0x4c, 0xb2, 0xe1, 0xda, 0x91, 0xb0, 0xfb, 0x98, 0xf4,
+ 0xc2, 0xf8, 0x92, 0x4b, 0x7e, 0x71, 0x74, 0xe3, 0x1b, 0x0d, 0x88, 0xba, 0xf2, 0xbb, 0xdd, 0xee,
+ 0xfe, 0xec, 0xda, 0x9f, 0x85, 0x4a, 0x5f, 0x6a, 0x0f, 0x66, 0x97, 0x9f, 0x96, 0x51, 0xf1, 0x3e,
+ 0x1f, 0x13, 0x1d, 0xaa, 0xaa, 0xdd, 0x1f, 0xf4, 0x7d, 0x4b, 0x4d, 0xbc, 0x02, 0x05, 0xa5, 0xda,
+ 0xf1, 0x2d, 0x4e, 0xfe, 0x03, 0xa5, 0x61, 0xdc, 0x57, 0x93, 0x57, 0x99, 0x7a, 0x19, 0xf3, 0xed,
+ 0x54, 0x03, 0xa5, 0x09, 0x58, 0x0e, 0xd7, 0x9e, 0x6f, 0x8d, 0xf1, 0x94, 0x96, 0x28, 0xca, 0xc6,
+ 0xff, 0xa1, 0x7e, 0xdc, 0x61, 0xf6, 0xc5, 0xa1, 0x2d, 0xfa, 0xe2, 0xc8, 0xa6, 0xbe, 0x38, 0x2e,
+ 0x5d, 0x87, 0xca, 0xec, 0x73, 0x85, 0x54, 0xa0, 0xd0, 0xbe, 0xf3, 0xc1, 0xf6, 0x8d, 0x7a, 0x86,
+ 0x9c, 0x82, 0xca, 0xad, 0xdb, 0xdd, 0x03, 0xb5, 0xd4, 0xc8, 0x32, 0x54, 0x69, 0xfb, 0xbd, 0xf6,
+ 0xc7, 0x07, 0x37, 0xb7, 0xbb, 0x3b, 0xbb, 0xf5, 0x2c, 0x21, 0x50, 0x53, 0x8a, 0x5b, 0xb7, 0x63,
+ 0x5d, 0xce, 0x6c, 0x3f, 0x79, 0xde, 0xcc, 0x3c, 0x7d, 0xde, 0xcc, 0xbc, 0x7c, 0xde, 0xd4, 0x3e,
+ 0x3b, 0x6c, 0x6a, 0xdf, 0x1e, 0x36, 0xb5, 0xc7, 0x87, 0x4d, 0xed, 0xc9, 0x61, 0x53, 0xfb, 0xf9,
+ 0xb0, 0xa9, 0xfd, 0x7a, 0xd8, 0xcc, 0xbc, 0x3c, 0x6c, 0x6a, 0x5f, 0xbe, 0x68, 0x66, 0x9e, 0xbc,
+ 0x68, 0x66, 0x9e, 0xbe, 0x68, 0x66, 0x3e, 0x59, 0xc6, 0xcc, 0x5d, 0xdb, 0xb2, 0x1c, 0xfe, 0x90,
+ 0x85, 0xbc, 0x57, 0xc4, 0x47, 0x77, 0xf5, 0xb7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x10, 0x27, 0x47,
+ 0xda, 0x7f, 0x0c, 0x00, 0x00,
+}
+
+func (x MatchType) String() string {
+ s, ok := MatchType_name[int32(x)]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(x))
}
-
func (this *PrometheusRangeQueryRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
@@ -1113,14 +1435,14 @@ func (this *PrometheusInstantQueryRequest) Equal(that interface{}) bool {
}
return true
}
-func (this *PrometheusResponseHeader) Equal(that interface{}) bool {
+func (this *LabelMatchers) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*PrometheusResponseHeader)
+ that1, ok := that.(*LabelMatchers)
if !ok {
- that2, ok := that.(PrometheusResponseHeader)
+ that2, ok := that.(LabelMatchers)
if ok {
that1 = &that2
} else {
@@ -1132,27 +1454,24 @@ func (this *PrometheusResponseHeader) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.Name != that1.Name {
- return false
- }
- if len(this.Values) != len(that1.Values) {
+ if len(this.MatcherSet) != len(that1.MatcherSet) {
return false
}
- for i := range this.Values {
- if this.Values[i] != that1.Values[i] {
+ for i := range this.MatcherSet {
+ if !this.MatcherSet[i].Equal(that1.MatcherSet[i]) {
return false
}
}
return true
}
-func (this *PrometheusResponse) Equal(that interface{}) bool {
+func (this *LabelMatcher) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*PrometheusResponse)
+ that1, ok := that.(*LabelMatcher)
if !ok {
- that2, ok := that.(PrometheusResponse)
+ that2, ok := that.(LabelMatcher)
if ok {
that1 = &that2
} else {
@@ -1164,44 +1483,25 @@ func (this *PrometheusResponse) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.Status != that1.Status {
- return false
- }
- if !this.Data.Equal(that1.Data) {
- return false
- }
- if this.ErrorType != that1.ErrorType {
- return false
- }
- if this.Error != that1.Error {
+ if this.Type != that1.Type {
return false
}
- if len(this.Headers) != len(that1.Headers) {
+ if this.Name != that1.Name {
return false
}
- for i := range this.Headers {
- if !this.Headers[i].Equal(that1.Headers[i]) {
- return false
- }
- }
- if len(this.Warnings) != len(that1.Warnings) {
+ if this.Value != that1.Value {
return false
}
- for i := range this.Warnings {
- if this.Warnings[i] != that1.Warnings[i] {
- return false
- }
- }
return true
}
-func (this *PrometheusData) Equal(that interface{}) bool {
+func (this *PrometheusLabelNamesQueryRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*PrometheusData)
+ that1, ok := that.(*PrometheusLabelNamesQueryRequest)
if !ok {
- that2, ok := that.(PrometheusData)
+ that2, ok := that.(PrometheusLabelNamesQueryRequest)
if ok {
that1 = &that2
} else {
@@ -1213,20 +1513,186 @@ func (this *PrometheusData) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.ResultType != that1.ResultType {
+ if this.Path != that1.Path {
return false
}
- if len(this.Result) != len(that1.Result) {
+ if this.Start != that1.Start {
return false
}
- for i := range this.Result {
- if !this.Result[i].Equal(&that1.Result[i]) {
+ if this.End != that1.End {
+ return false
+ }
+ if len(this.LabelMatcherSets) != len(that1.LabelMatcherSets) {
+ return false
+ }
+ for i := range this.LabelMatcherSets {
+ if this.LabelMatcherSets[i] != that1.LabelMatcherSets[i] {
return false
}
}
+ if this.Id != that1.Id {
+ return false
+ }
return true
}
-func (this *SampleStream) Equal(that interface{}) bool {
+func (this *PrometheusLabelValuesQueryRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*PrometheusLabelValuesQueryRequest)
+ if !ok {
+ that2, ok := that.(PrometheusLabelValuesQueryRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Path != that1.Path {
+ return false
+ }
+ if this.LabelName != that1.LabelName {
+ return false
+ }
+ if this.Start != that1.Start {
+ return false
+ }
+ if this.End != that1.End {
+ return false
+ }
+ if len(this.LabelMatcherSets) != len(that1.LabelMatcherSets) {
+ return false
+ }
+ for i := range this.LabelMatcherSets {
+ if this.LabelMatcherSets[i] != that1.LabelMatcherSets[i] {
+ return false
+ }
+ }
+ if this.Id != that1.Id {
+ return false
+ }
+ return true
+}
+func (this *PrometheusResponseHeader) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*PrometheusResponseHeader)
+ if !ok {
+ that2, ok := that.(PrometheusResponseHeader)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Name != that1.Name {
+ return false
+ }
+ if len(this.Values) != len(that1.Values) {
+ return false
+ }
+ for i := range this.Values {
+ if this.Values[i] != that1.Values[i] {
+ return false
+ }
+ }
+ return true
+}
+func (this *PrometheusResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*PrometheusResponse)
+ if !ok {
+ that2, ok := that.(PrometheusResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Status != that1.Status {
+ return false
+ }
+ if !this.Data.Equal(that1.Data) {
+ return false
+ }
+ if this.ErrorType != that1.ErrorType {
+ return false
+ }
+ if this.Error != that1.Error {
+ return false
+ }
+ if len(this.Headers) != len(that1.Headers) {
+ return false
+ }
+ for i := range this.Headers {
+ if !this.Headers[i].Equal(that1.Headers[i]) {
+ return false
+ }
+ }
+ if len(this.Warnings) != len(that1.Warnings) {
+ return false
+ }
+ for i := range this.Warnings {
+ if this.Warnings[i] != that1.Warnings[i] {
+ return false
+ }
+ }
+ return true
+}
+func (this *PrometheusData) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*PrometheusData)
+ if !ok {
+ that2, ok := that.(PrometheusData)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.ResultType != that1.ResultType {
+ return false
+ }
+ if len(this.Result) != len(that1.Result) {
+ return false
+ }
+ for i := range this.Result {
+ if !this.Result[i].Equal(&that1.Result[i]) {
+ return false
+ }
+ }
+ return true
+}
+func (this *SampleStream) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
@@ -1558,6 +2024,59 @@ func (this *PrometheusInstantQueryRequest) GoString() string {
s = append(s, "}")
return strings.Join(s, "")
}
+func (this *LabelMatchers) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&querymiddleware.LabelMatchers{")
+ if this.MatcherSet != nil {
+ s = append(s, "MatcherSet: "+fmt.Sprintf("%#v", this.MatcherSet)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *LabelMatcher) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&querymiddleware.LabelMatcher{")
+ s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *PrometheusLabelNamesQueryRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&querymiddleware.PrometheusLabelNamesQueryRequest{")
+ s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "LabelMatcherSets: "+fmt.Sprintf("%#v", this.LabelMatcherSets)+",\n")
+ s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *PrometheusLabelValuesQueryRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&querymiddleware.PrometheusLabelValuesQueryRequest{")
+ s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+ s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "LabelMatcherSets: "+fmt.Sprintf("%#v", this.LabelMatcherSets)+",\n")
+ s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
func (this *PrometheusResponseHeader) GoString() string {
if this == nil {
return "nil"
@@ -1897,7 +2416,7 @@ func (m *PrometheusInstantQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int,
return len(dAtA) - i, nil
}
-func (m *PrometheusResponseHeader) Marshal() (dAtA []byte, err error) {
+func (m *LabelMatchers) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1907,36 +2426,76 @@ func (m *PrometheusResponseHeader) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *PrometheusResponseHeader) MarshalTo(dAtA []byte) (int, error) {
+func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *PrometheusResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Values) > 0 {
- for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Values[iNdEx])
- copy(dAtA[i:], m.Values[iNdEx])
- i = encodeVarintModel(dAtA, i, uint64(len(m.Values[iNdEx])))
+ if len(m.MatcherSet) > 0 {
+ for iNdEx := len(m.MatcherSet) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MatcherSet[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintModel(dAtA, i, uint64(size))
+ }
i--
- dAtA[i] = 0x12
+ dAtA[i] = 0xa
}
}
+ return len(dAtA) - i, nil
+}
+
+func (m *LabelMatcher) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintModel(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x1a
+ }
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintModel(dAtA, i, uint64(len(m.Name)))
i--
- dAtA[i] = 0xa
+ dAtA[i] = 0x12
+ }
+ if m.Type != 0 {
+ i = encodeVarintModel(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
-func (m *PrometheusResponse) Marshal() (dAtA []byte, err error) {
+func (m *PrometheusLabelNamesQueryRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1946,28 +2505,182 @@ func (m *PrometheusResponse) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *PrometheusResponse) MarshalTo(dAtA []byte) (int, error) {
+func (m *PrometheusLabelNamesQueryRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *PrometheusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *PrometheusLabelNamesQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Warnings) > 0 {
- for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Warnings[iNdEx])
- copy(dAtA[i:], m.Warnings[iNdEx])
- i = encodeVarintModel(dAtA, i, uint64(len(m.Warnings[iNdEx])))
+ if m.Id != 0 {
+ i = encodeVarintModel(dAtA, i, uint64(m.Id))
+ i--
+ dAtA[i] = 0x28
+ }
+ if len(m.LabelMatcherSets) > 0 {
+ for iNdEx := len(m.LabelMatcherSets) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.LabelMatcherSets[iNdEx])
+ copy(dAtA[i:], m.LabelMatcherSets[iNdEx])
+ i = encodeVarintModel(dAtA, i, uint64(len(m.LabelMatcherSets[iNdEx])))
i--
- dAtA[i] = 0x32
+ dAtA[i] = 0x22
}
}
- if len(m.Headers) > 0 {
- for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- {
- {
+ if m.End != 0 {
+ i = encodeVarintModel(dAtA, i, uint64(m.End))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Start != 0 {
+ i = encodeVarintModel(dAtA, i, uint64(m.Start))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintModel(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PrometheusLabelValuesQueryRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PrometheusLabelValuesQueryRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PrometheusLabelValuesQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Id != 0 {
+ i = encodeVarintModel(dAtA, i, uint64(m.Id))
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.LabelMatcherSets) > 0 {
+ for iNdEx := len(m.LabelMatcherSets) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.LabelMatcherSets[iNdEx])
+ copy(dAtA[i:], m.LabelMatcherSets[iNdEx])
+ i = encodeVarintModel(dAtA, i, uint64(len(m.LabelMatcherSets[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.End != 0 {
+ i = encodeVarintModel(dAtA, i, uint64(m.End))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Start != 0 {
+ i = encodeVarintModel(dAtA, i, uint64(m.Start))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.LabelName) > 0 {
+ i -= len(m.LabelName)
+ copy(dAtA[i:], m.LabelName)
+ i = encodeVarintModel(dAtA, i, uint64(len(m.LabelName)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintModel(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PrometheusResponseHeader) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PrometheusResponseHeader) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PrometheusResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Values) > 0 {
+ for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Values[iNdEx])
+ copy(dAtA[i:], m.Values[iNdEx])
+ i = encodeVarintModel(dAtA, i, uint64(len(m.Values[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintModel(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PrometheusResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PrometheusResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PrometheusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Warnings) > 0 {
+ for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Warnings[iNdEx])
+ copy(dAtA[i:], m.Warnings[iNdEx])
+ i = encodeVarintModel(dAtA, i, uint64(len(m.Warnings[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.Headers) > 0 {
+ for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- {
+ {
size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
@@ -2534,6 +3247,101 @@ func (m *PrometheusInstantQueryRequest) Size() (n int) {
return n
}
+func (m *LabelMatchers) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.MatcherSet) > 0 {
+ for _, e := range m.MatcherSet {
+ l = e.Size()
+ n += 1 + l + sovModel(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LabelMatcher) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovModel(uint64(m.Type))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovModel(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovModel(uint64(l))
+ }
+ return n
+}
+
+func (m *PrometheusLabelNamesQueryRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovModel(uint64(l))
+ }
+ if m.Start != 0 {
+ n += 1 + sovModel(uint64(m.Start))
+ }
+ if m.End != 0 {
+ n += 1 + sovModel(uint64(m.End))
+ }
+ if len(m.LabelMatcherSets) > 0 {
+ for _, s := range m.LabelMatcherSets {
+ l = len(s)
+ n += 1 + l + sovModel(uint64(l))
+ }
+ }
+ if m.Id != 0 {
+ n += 1 + sovModel(uint64(m.Id))
+ }
+ return n
+}
+
+func (m *PrometheusLabelValuesQueryRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovModel(uint64(l))
+ }
+ l = len(m.LabelName)
+ if l > 0 {
+ n += 1 + l + sovModel(uint64(l))
+ }
+ if m.Start != 0 {
+ n += 1 + sovModel(uint64(m.Start))
+ }
+ if m.End != 0 {
+ n += 1 + sovModel(uint64(m.End))
+ }
+ if len(m.LabelMatcherSets) > 0 {
+ for _, s := range m.LabelMatcherSets {
+ l = len(s)
+ n += 1 + l + sovModel(uint64(l))
+ }
+ }
+ if m.Id != 0 {
+ n += 1 + sovModel(uint64(m.Id))
+ }
+ return n
+}
+
func (m *PrometheusResponseHeader) Size() (n int) {
if m == nil {
return 0
@@ -2823,54 +3631,110 @@ func (this *PrometheusInstantQueryRequest) String() string {
}, "")
return s
}
-func (this *PrometheusResponseHeader) String() string {
+func (this *LabelMatchers) String() string {
if this == nil {
return "nil"
}
- s := strings.Join([]string{`&PrometheusResponseHeader{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+ repeatedStringForMatcherSet := "[]*LabelMatcher{"
+ for _, f := range this.MatcherSet {
+ repeatedStringForMatcherSet += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + ","
+ }
+ repeatedStringForMatcherSet += "}"
+ s := strings.Join([]string{`&LabelMatchers{`,
+ `MatcherSet:` + repeatedStringForMatcherSet + `,`,
`}`,
}, "")
return s
}
-func (this *PrometheusResponse) String() string {
+func (this *LabelMatcher) String() string {
if this == nil {
return "nil"
}
- repeatedStringForHeaders := "[]*PrometheusResponseHeader{"
- for _, f := range this.Headers {
- repeatedStringForHeaders += strings.Replace(f.String(), "PrometheusResponseHeader", "PrometheusResponseHeader", 1) + ","
- }
- repeatedStringForHeaders += "}"
- s := strings.Join([]string{`&PrometheusResponse{`,
- `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
- `Data:` + strings.Replace(this.Data.String(), "PrometheusData", "PrometheusData", 1) + `,`,
- `ErrorType:` + fmt.Sprintf("%v", this.ErrorType) + `,`,
- `Error:` + fmt.Sprintf("%v", this.Error) + `,`,
- `Headers:` + repeatedStringForHeaders + `,`,
- `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`,
+ s := strings.Join([]string{`&LabelMatcher{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
`}`,
}, "")
return s
}
-func (this *PrometheusData) String() string {
+func (this *PrometheusLabelNamesQueryRequest) String() string {
if this == nil {
return "nil"
}
- repeatedStringForResult := "[]SampleStream{"
- for _, f := range this.Result {
- repeatedStringForResult += strings.Replace(strings.Replace(f.String(), "SampleStream", "SampleStream", 1), `&`, ``, 1) + ","
- }
- repeatedStringForResult += "}"
- s := strings.Join([]string{`&PrometheusData{`,
- `ResultType:` + fmt.Sprintf("%v", this.ResultType) + `,`,
- `Result:` + repeatedStringForResult + `,`,
+ s := strings.Join([]string{`&PrometheusLabelNamesQueryRequest{`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `Start:` + fmt.Sprintf("%v", this.Start) + `,`,
+ `End:` + fmt.Sprintf("%v", this.End) + `,`,
+ `LabelMatcherSets:` + fmt.Sprintf("%v", this.LabelMatcherSets) + `,`,
+ `Id:` + fmt.Sprintf("%v", this.Id) + `,`,
`}`,
}, "")
return s
}
-func (this *SampleStream) String() string {
+func (this *PrometheusLabelValuesQueryRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PrometheusLabelValuesQueryRequest{`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`,
+ `Start:` + fmt.Sprintf("%v", this.Start) + `,`,
+ `End:` + fmt.Sprintf("%v", this.End) + `,`,
+ `LabelMatcherSets:` + fmt.Sprintf("%v", this.LabelMatcherSets) + `,`,
+ `Id:` + fmt.Sprintf("%v", this.Id) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PrometheusResponseHeader) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PrometheusResponseHeader{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PrometheusResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForHeaders := "[]*PrometheusResponseHeader{"
+ for _, f := range this.Headers {
+ repeatedStringForHeaders += strings.Replace(f.String(), "PrometheusResponseHeader", "PrometheusResponseHeader", 1) + ","
+ }
+ repeatedStringForHeaders += "}"
+ s := strings.Join([]string{`&PrometheusResponse{`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `Data:` + strings.Replace(this.Data.String(), "PrometheusData", "PrometheusData", 1) + `,`,
+ `ErrorType:` + fmt.Sprintf("%v", this.ErrorType) + `,`,
+ `Error:` + fmt.Sprintf("%v", this.Error) + `,`,
+ `Headers:` + repeatedStringForHeaders + `,`,
+ `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PrometheusData) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResult := "[]SampleStream{"
+ for _, f := range this.Result {
+ repeatedStringForResult += strings.Replace(strings.Replace(f.String(), "SampleStream", "SampleStream", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResult += "}"
+ s := strings.Join([]string{`&PrometheusData{`,
+ `ResultType:` + fmt.Sprintf("%v", this.ResultType) + `,`,
+ `Result:` + repeatedStringForResult + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SampleStream) String() string {
if this == nil {
return "nil"
}
@@ -3523,6 +4387,609 @@ func (m *PrometheusInstantQueryRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelMatchers: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelMatchers: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatcherSet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthModel
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthModel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatcherSet = append(m.MatcherSet, &LabelMatcher{})
+ if err := m.MatcherSet[len(m.MatcherSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipModel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthModel
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthModel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= MatchType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthModel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthModel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthModel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthModel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipModel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthModel
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthModel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PrometheusLabelNamesQueryRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PrometheusLabelNamesQueryRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PrometheusLabelNamesQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthModel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthModel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ m.Start = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Start |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ m.End = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.End |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelMatcherSets", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthModel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthModel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LabelMatcherSets = append(m.LabelMatcherSets, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
+ }
+ m.Id = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Id |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipModel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthModel
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthModel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PrometheusLabelValuesQueryRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PrometheusLabelValuesQueryRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PrometheusLabelValuesQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthModel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthModel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthModel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthModel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LabelName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ m.Start = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Start |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ m.End = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.End |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelMatcherSets", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthModel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthModel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LabelMatcherSets = append(m.LabelMatcherSets, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
+ }
+ m.Id = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowModel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Id |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipModel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthModel
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthModel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *PrometheusResponseHeader) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/pkg/frontend/querymiddleware/model.proto b/pkg/frontend/querymiddleware/model.proto
index effbb6201d3..f3a20101bb8 100644
--- a/pkg/frontend/querymiddleware/model.proto
+++ b/pkg/frontend/querymiddleware/model.proto
@@ -27,7 +27,7 @@ message PrometheusRangeQueryRequest {
string query = 6;
Options options = 7 [(gogoproto.nullable) = false];
- // ID of the request used by splitAndCacheMiddleware to correlate downstream requests and responses.
+ // ID of the request used to correlate downstream requests and responses.
int64 id = 8;
// Hints that could be optionally attached to the request to pass down the stack.
@@ -41,7 +41,7 @@ message PrometheusInstantQueryRequest {
string query = 3;
Options options = 4 [(gogoproto.nullable) = false];
- // ID of the request used by splitAndCacheMiddleware to correlate downstream requests and responses.
+ // ID of the request used to correlate downstream requests and responses.
int64 id = 5;
// Hints that could be optionally attached to the request to pass down the stack.
@@ -49,6 +49,57 @@ message PrometheusInstantQueryRequest {
Hints hints = 6 [(gogoproto.nullable) = true];
}
+message LabelMatchers {
+ repeated LabelMatcher matcherSet = 1;
+}
+
+enum MatchType {
+ EQUAL = 0;
+ NOT_EQUAL = 1;
+ REGEX_MATCH = 2;
+ REGEX_NO_MATCH = 3;
+}
+
+message LabelMatcher {
+ MatchType type = 1;
+ string name = 2;
+ string value = 3;
+}
+
+message PrometheusLabelNamesQueryRequest {
+ string path = 1;
+
+ int64 start = 2;
+ int64 end = 3;
+
+ // labelMatcherSets is a repeated field here in order to enable the representation
+ // of labels queries which have not yet been split; the prometheus querier code
+ // will eventually split requests like `?match[]=up&match[]=process_start_time_seconds{job="prometheus"}`
+ // into separate queries, one for each matcher set
+ repeated string labelMatcherSets = 4;
+
+ // ID of the request used to correlate downstream requests and responses.
+ int64 id = 5;
+}
+
+message PrometheusLabelValuesQueryRequest {
+ string path = 1;
+
+ string labelName = 2;
+
+ int64 start = 3;
+ int64 end = 4;
+
+ // labelMatcherSets is a repeated field here in order to enable the representation
+ // of labels queries which have not yet been split; the prometheus querier code
+ // will eventually split requests like `?match[]=up&match[]=process_start_time_seconds{job="prometheus"}`
+ // into separate queries, one for each matcher set
+ repeated string labelMatcherSets = 5;
+
+ // ID of the request used to correlate downstream requests and responses.
+ int64 id = 6;
+}
+
message PrometheusResponseHeader {
string Name = 1 [(gogoproto.jsontag) = "-"];
repeated string Values = 2 [(gogoproto.jsontag) = "-"];
diff --git a/pkg/frontend/querymiddleware/model_extra.go b/pkg/frontend/querymiddleware/model_extra.go
index 42af4a252a0..d2b5b50ff1c 100644
--- a/pkg/frontend/querymiddleware/model_extra.go
+++ b/pkg/frontend/querymiddleware/model_extra.go
@@ -18,6 +18,7 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/timestamp"
+ v1 "github.com/prometheus/prometheus/web/api/v1"
"github.com/grafana/mimir/pkg/mimirpb"
)
@@ -42,14 +43,14 @@ func newEmptyPrometheusResponse() *PrometheusResponse {
}
// WithID clones the current `PrometheusRangeQueryRequest` with the provided ID.
-func (q *PrometheusRangeQueryRequest) WithID(id int64) Request {
+func (q *PrometheusRangeQueryRequest) WithID(id int64) MetricsQueryRequest {
newRequest := *q
newRequest.Id = id
return &newRequest
}
// WithStartEnd clones the current `PrometheusRangeQueryRequest` with a new `start` and `end` timestamp.
-func (q *PrometheusRangeQueryRequest) WithStartEnd(start int64, end int64) Request {
+func (q *PrometheusRangeQueryRequest) WithStartEnd(start int64, end int64) MetricsQueryRequest {
newRequest := *q
newRequest.Start = start
newRequest.End = end
@@ -57,7 +58,7 @@ func (q *PrometheusRangeQueryRequest) WithStartEnd(start int64, end int64) Reque
}
// WithQuery clones the current `PrometheusRangeQueryRequest` with a new query.
-func (q *PrometheusRangeQueryRequest) WithQuery(query string) Request {
+func (q *PrometheusRangeQueryRequest) WithQuery(query string) MetricsQueryRequest {
newRequest := *q
newRequest.Query = query
return &newRequest
@@ -65,7 +66,7 @@ func (q *PrometheusRangeQueryRequest) WithQuery(query string) Request {
// WithTotalQueriesHint clones the current `PrometheusRangeQueryRequest` with an
// added Hint value for TotalQueries.
-func (q *PrometheusRangeQueryRequest) WithTotalQueriesHint(totalQueries int32) Request {
+func (q *PrometheusRangeQueryRequest) WithTotalQueriesHint(totalQueries int32) MetricsQueryRequest {
newRequest := *q
if newRequest.Hints == nil {
newRequest.Hints = &Hints{TotalQueries: totalQueries}
@@ -78,7 +79,7 @@ func (q *PrometheusRangeQueryRequest) WithTotalQueriesHint(totalQueries int32) R
// WithEstimatedSeriesCountHint clones the current `PrometheusRangeQueryRequest`
// with an added Hint value for EstimatedCardinality.
-func (q *PrometheusRangeQueryRequest) WithEstimatedSeriesCountHint(count uint64) Request {
+func (q *PrometheusRangeQueryRequest) WithEstimatedSeriesCountHint(count uint64) MetricsQueryRequest {
newRequest := *q
if newRequest.Hints == nil {
newRequest.Hints = &Hints{
@@ -112,25 +113,25 @@ func (r *PrometheusInstantQueryRequest) GetStep() int64 {
return 0
}
-func (r *PrometheusInstantQueryRequest) WithID(id int64) Request {
+func (r *PrometheusInstantQueryRequest) WithID(id int64) MetricsQueryRequest {
newRequest := *r
newRequest.Id = id
return &newRequest
}
-func (r *PrometheusInstantQueryRequest) WithStartEnd(startTime int64, _ int64) Request {
+func (r *PrometheusInstantQueryRequest) WithStartEnd(startTime int64, _ int64) MetricsQueryRequest {
newRequest := *r
newRequest.Time = startTime
return &newRequest
}
-func (r *PrometheusInstantQueryRequest) WithQuery(s string) Request {
+func (r *PrometheusInstantQueryRequest) WithQuery(s string) MetricsQueryRequest {
newRequest := *r
newRequest.Query = s
return &newRequest
}
-func (r *PrometheusInstantQueryRequest) WithTotalQueriesHint(totalQueries int32) Request {
+func (r *PrometheusInstantQueryRequest) WithTotalQueriesHint(totalQueries int32) MetricsQueryRequest {
newRequest := *r
if newRequest.Hints == nil {
newRequest.Hints = &Hints{TotalQueries: totalQueries}
@@ -141,7 +142,7 @@ func (r *PrometheusInstantQueryRequest) WithTotalQueriesHint(totalQueries int32)
return &newRequest
}
-func (r *PrometheusInstantQueryRequest) WithEstimatedSeriesCountHint(count uint64) Request {
+func (r *PrometheusInstantQueryRequest) WithEstimatedSeriesCountHint(count uint64) MetricsQueryRequest {
newRequest := *r
if newRequest.Hints == nil {
newRequest.Hints = &Hints{
@@ -161,6 +162,55 @@ func (r *PrometheusInstantQueryRequest) AddSpanTags(sp opentracing.Span) {
sp.SetTag("time", timestamp.Time(r.GetTime()).String())
}
+func (r *PrometheusLabelNamesQueryRequest) GetLabelName() string {
+ return ""
+}
+
+func (r *PrometheusLabelNamesQueryRequest) GetStartOrDefault() int64 {
+ if r.GetStart() == 0 {
+ return v1.MinTime.UnixMilli()
+ }
+ return r.GetStart()
+}
+
+func (r *PrometheusLabelNamesQueryRequest) GetEndOrDefault() int64 {
+ if r.GetEnd() == 0 {
+ return v1.MaxTime.UnixMilli()
+ }
+ return r.GetEnd()
+}
+
+func (r *PrometheusLabelValuesQueryRequest) GetStartOrDefault() int64 {
+ if r.GetStart() == 0 {
+ return v1.MinTime.UnixMilli()
+ }
+ return r.GetStart()
+}
+
+func (r *PrometheusLabelValuesQueryRequest) GetEndOrDefault() int64 {
+ if r.GetEnd() == 0 {
+ return v1.MaxTime.UnixMilli()
+ }
+ return r.GetEnd()
+}
+
+// AddSpanTags writes query information about the current `PrometheusLabelNamesQueryRequest`
+// to a span's tag ("attributes" in OpenTelemetry parlance).
+func (r *PrometheusLabelNamesQueryRequest) AddSpanTags(sp opentracing.Span) {
+ sp.SetTag("matchers", fmt.Sprintf("%v", r.GetLabelMatcherSets()))
+ sp.SetTag("start", timestamp.Time(r.GetStart()).String())
+ sp.SetTag("end", timestamp.Time(r.GetEnd()).String())
+}
+
+// AddSpanTags writes query information about the current `PrometheusLabelNamesQueryRequest`
+// to a span's tag ("attributes" in OpenTelemetry parlance).
+func (r *PrometheusLabelValuesQueryRequest) AddSpanTags(sp opentracing.Span) {
+ sp.SetTag("label", fmt.Sprintf("%v", r.GetLabelName()))
+ sp.SetTag("matchers", fmt.Sprintf("%v", r.GetLabelMatcherSets()))
+ sp.SetTag("start", timestamp.Time(r.GetStart()).String())
+ sp.SetTag("end", timestamp.Time(r.GetEnd()).String())
+}
+
func (d *PrometheusData) UnmarshalJSON(b []byte) error {
v := struct {
Type model.ValueType `json:"resultType"`
diff --git a/pkg/frontend/querymiddleware/querysharding.go b/pkg/frontend/querymiddleware/querysharding.go
index 53ba265db3f..9f071aec107 100644
--- a/pkg/frontend/querymiddleware/querysharding.go
+++ b/pkg/frontend/querymiddleware/querysharding.go
@@ -38,7 +38,7 @@ type querySharding struct {
limit Limits
engine *promql.Engine
- next Handler
+ next MetricsQueryHandler
logger log.Logger
maxSeriesPerShard uint64
@@ -64,7 +64,7 @@ func newQueryShardingMiddleware(
limit Limits,
maxSeriesPerShard uint64,
registerer prometheus.Registerer,
-) Middleware {
+) MetricsQueryMiddleware {
metrics := queryShardingMetrics{
shardingAttempts: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
Name: "cortex_frontend_query_sharding_rewrites_attempted_total",
@@ -84,7 +84,7 @@ func newQueryShardingMiddleware(
Buckets: prometheus.ExponentialBuckets(2, 2, 10),
}),
}
- return MiddlewareFunc(func(next Handler) Handler {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return &querySharding{
next: next,
queryShardingMetrics: metrics,
@@ -96,7 +96,7 @@ func newQueryShardingMiddleware(
})
}
-func (s *querySharding) Do(ctx context.Context, r Request) (Response, error) {
+func (s *querySharding) Do(ctx context.Context, r MetricsQueryRequest) (Response, error) {
log := spanlogger.FromContext(ctx, s.logger)
tenantIDs, err := tenant.TenantIDs(ctx)
@@ -171,7 +171,7 @@ func (s *querySharding) Do(ctx context.Context, r Request) (Response, error) {
}, nil
}
-func newQuery(ctx context.Context, r Request, engine *promql.Engine, queryable storage.Queryable) (promql.Query, error) {
+func newQuery(ctx context.Context, r MetricsQueryRequest, engine *promql.Engine, queryable storage.Queryable) (promql.Query, error) {
switch r := r.(type) {
case *PrometheusRangeQueryRequest:
return engine.NewRangeQuery(
@@ -255,7 +255,7 @@ func (s *querySharding) shardQuery(ctx context.Context, query string, totalShard
}
// getShardsForQuery calculates and return the number of shards that should be used to run the query.
-func (s *querySharding) getShardsForQuery(ctx context.Context, tenantIDs []string, r Request, queryExpr parser.Expr, spanLog *spanlogger.SpanLogger) int {
+func (s *querySharding) getShardsForQuery(ctx context.Context, tenantIDs []string, r MetricsQueryRequest, queryExpr parser.Expr, spanLog *spanlogger.SpanLogger) int {
// Check if sharding is disabled for the given request.
if r.GetOptions().ShardingDisabled {
return 1
diff --git a/pkg/frontend/querymiddleware/querysharding_test.go b/pkg/frontend/querymiddleware/querysharding_test.go
index 22786eeb25d..7231c493be5 100644
--- a/pkg/frontend/querymiddleware/querysharding_test.go
+++ b/pkg/frontend/querymiddleware/querysharding_test.go
@@ -50,8 +50,8 @@ var (
lookbackDelta = 5 * time.Minute
)
-func mockHandlerWith(resp *PrometheusResponse, err error) Handler {
- return HandlerFunc(func(ctx context.Context, req Request) (Response, error) {
+func mockHandlerWith(resp *PrometheusResponse, err error) MetricsQueryHandler {
+ return HandlerFunc(func(ctx context.Context, _ MetricsQueryRequest) (Response, error) {
if expired := ctx.Err(); expired != nil {
return nil, expired
}
@@ -681,7 +681,7 @@ func TestQuerySharding_Correctness(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
- reqs := []Request{
+ reqs := []MetricsQueryRequest{
&PrometheusInstantQueryRequest{
Path: "/query",
Time: util.TimeToMillis(end),
@@ -1356,7 +1356,7 @@ func TestQuerySharding_ShouldSupportMaxShardedQueries(t *testing.T) {
ResultType: string(parser.ValueTypeVector),
},
}, nil).Run(func(args mock.Arguments) {
- req := args[1].(Request)
+ req := args[1].(MetricsQueryRequest)
reqShard := regexp.MustCompile(`__query_shard__="[^"]+"`).FindString(req.GetQuery())
uniqueShardsMx.Lock()
@@ -1449,7 +1449,7 @@ func TestQuerySharding_ShouldSupportMaxRegexpSizeBytes(t *testing.T) {
ResultType: string(parser.ValueTypeVector),
},
}, nil).Run(func(args mock.Arguments) {
- req := args[1].(Request)
+ req := args[1].(MetricsQueryRequest)
reqShard := regexp.MustCompile(`__query_shard__="[^"]+"`).FindString(req.GetQuery())
uniqueShardsMx.Lock()
@@ -1499,7 +1499,7 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) {
LookbackDelta: lookbackDelta,
EnableAtModifier: true,
EnableNegativeOffset: true,
- NoStepSubqueryIntervalFn: func(rangeMillis int64) int64 {
+ NoStepSubqueryIntervalFn: func(int64) int64 {
return int64(1 * time.Minute / (time.Millisecond / time.Nanosecond))
},
})
@@ -1512,14 +1512,14 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) {
LookbackDelta: lookbackDelta,
EnableAtModifier: true,
EnableNegativeOffset: true,
- NoStepSubqueryIntervalFn: func(rangeMillis int64) int64 {
+ NoStepSubqueryIntervalFn: func(int64) int64 {
return int64(1 * time.Minute / (time.Millisecond / time.Nanosecond))
},
})
- queryableInternalErr = storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) {
+ queryableInternalErr = storage.QueryableFunc(func(int64, int64) (storage.Querier, error) {
return nil, apierror.New(apierror.TypeInternal, "some internal error")
})
- queryablePrometheusExecErr = storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) {
+ queryablePrometheusExecErr = storage.QueryableFunc(func(int64, int64) (storage.Querier, error) {
return nil, apierror.Newf(apierror.TypeExec, "expanding series: %s", querier.NewMaxQueryLengthError(744*time.Hour, 720*time.Hour))
})
queryable = storageSeriesQueryable([]*promql.StorageSeries{
@@ -1633,7 +1633,7 @@ func TestQuerySharding_EngineErrorMapping(t *testing.T) {
series = append(series, newSeries(newTestCounterLabels(i), start.Add(-lookbackDelta), end, step, factor(float64(i)*0.1)))
}
- queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) {
+ queryable := storage.QueryableFunc(func(int64, int64) (storage.Querier, error) {
return &querierMock{series: series}, nil
})
@@ -1681,7 +1681,7 @@ func TestQuerySharding_ShouldUseCardinalityEstimate(t *testing.T) {
tests := []struct {
name string
- req Request
+ req MetricsQueryRequest
expectedCalls int
}{
{
@@ -2147,7 +2147,7 @@ type downstreamHandler struct {
queryable storage.Queryable
}
-func (h *downstreamHandler) Do(ctx context.Context, r Request) (Response, error) {
+func (h *downstreamHandler) Do(ctx context.Context, r MetricsQueryRequest) (Response, error) {
qry, err := newQuery(ctx, r, h.engine, h.queryable)
if err != nil {
return nil, err
@@ -2174,7 +2174,7 @@ func (h *downstreamHandler) Do(ctx context.Context, r Request) (Response, error)
}
func storageSeriesQueryable(series []*promql.StorageSeries) storage.Queryable {
- return storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) {
+ return storage.QueryableFunc(func(int64, int64) (storage.Querier, error) {
return &querierMock{series: series}, nil
})
}
@@ -2391,7 +2391,7 @@ func stale(from, to time.Time, wrap generator) generator {
// constant returns a generator that generates a constant value
func constant(value float64) generator {
- return func(ts int64) float64 {
+ return func(int64) float64 {
return value
}
}
@@ -2440,7 +2440,7 @@ func newEngine() *promql.Engine {
LookbackDelta: lookbackDelta,
EnableAtModifier: true,
EnableNegativeOffset: true,
- NoStepSubqueryIntervalFn: func(rangeMillis int64) int64 {
+ NoStepSubqueryIntervalFn: func(int64) int64 {
return int64(1 * time.Minute / (time.Millisecond / time.Nanosecond))
},
})
diff --git a/pkg/frontend/querymiddleware/results_cache.go b/pkg/frontend/querymiddleware/results_cache.go
index 9cdabc5a020..6525063293e 100644
--- a/pkg/frontend/querymiddleware/results_cache.go
+++ b/pkg/frontend/querymiddleware/results_cache.go
@@ -11,7 +11,7 @@ import (
"flag"
"fmt"
"hash/fnv"
- "net/url"
+ "net/http"
"sort"
"strings"
"time"
@@ -186,30 +186,38 @@ var ErrUnsupportedRequest = errors.New("request is not cacheable")
// CacheKeyGenerator generates cache keys. This is a useful interface for downstream
// consumers who wish to implement their own strategies.
type CacheKeyGenerator interface {
- // QueryRequest should generate a cache key based on the tenant ID and Request.
- QueryRequest(ctx context.Context, tenantID string, r Request) string
+ // QueryRequest should generate a cache key based on the tenant ID and MetricsQueryRequest.
+ QueryRequest(ctx context.Context, tenantID string, r MetricsQueryRequest) string
// LabelValues should return a cache key for a label values request. The cache key does not need to contain the tenant ID.
// LabelValues can return ErrUnsupportedRequest, in which case the response won't be treated as an error, but the item will still not be cached.
// LabelValues should return a nil *GenericQueryCacheKey when it returns an error and
// should always return non-nil *GenericQueryCacheKey when the returned error is nil.
- LabelValues(ctx context.Context, path string, values url.Values) (*GenericQueryCacheKey, error)
+ LabelValues(r *http.Request) (*GenericQueryCacheKey, error)
// LabelValuesCardinality should return a cache key for a label values cardinality request. The cache key does not need to contain the tenant ID.
// LabelValuesCardinality can return ErrUnsupportedRequest, in which case the response won't be treated as an error, but the item will still not be cached.
// LabelValuesCardinality should return a nil *GenericQueryCacheKey when it returns an error and
// should always return non-nil *GenericQueryCacheKey when the returned error is nil.
- LabelValuesCardinality(ctx context.Context, path string, values url.Values) (*GenericQueryCacheKey, error)
+ LabelValuesCardinality(r *http.Request) (*GenericQueryCacheKey, error)
}
type DefaultCacheKeyGenerator struct {
- // Interval is a constant split interval when determining cache keys for QueryRequest.
- Interval time.Duration
+ codec Codec
+ // interval is a constant split interval when determining cache keys for QueryRequest.
+ interval time.Duration
}
-// QueryRequest generates a cache key based on the userID, Request and interval.
-func (t DefaultCacheKeyGenerator) QueryRequest(_ context.Context, userID string, r Request) string {
- startInterval := r.GetStart() / t.Interval.Milliseconds()
+func NewDefaultCacheKeyGenerator(codec Codec, interval time.Duration) DefaultCacheKeyGenerator {
+ return DefaultCacheKeyGenerator{
+ codec: codec,
+ interval: interval,
+ }
+}
+
+// QueryRequest generates a cache key based on the userID, MetricsQueryRequest and interval.
+func (g DefaultCacheKeyGenerator) QueryRequest(_ context.Context, userID string, r MetricsQueryRequest) string {
+ startInterval := r.GetStart() / g.interval.Milliseconds()
stepOffset := r.GetStart() % r.GetStep()
// Use original format for step-aligned request, so that we can use existing cached results for such requests.
@@ -222,13 +230,13 @@ func (t DefaultCacheKeyGenerator) QueryRequest(_ context.Context, userID string,
// shouldCacheFn checks whether the current request should go to cache
// or not. If not, just send the request to next handler.
-type shouldCacheFn func(r Request) bool
+type shouldCacheFn func(r MetricsQueryRequest) bool
// resultsCacheAlwaysEnabled is a shouldCacheFn function always returning true.
-var resultsCacheAlwaysEnabled = func(_ Request) bool { return true }
+var resultsCacheAlwaysEnabled = func(_ MetricsQueryRequest) bool { return true }
// isRequestCachable says whether the request is eligible for caching.
-func isRequestCachable(req Request, maxCacheTime int64, cacheUnalignedRequests bool, logger log.Logger) (cachable bool, reason string) {
+func isRequestCachable(req MetricsQueryRequest, maxCacheTime int64, cacheUnalignedRequests bool, logger log.Logger) (cachable bool, reason string) {
// We can run with step alignment disabled because Grafana does it already. Mimir automatically aligning start and end is not
// PromQL compatible. But this means we cannot cache queries that do not have their start and end aligned.
if !cacheUnalignedRequests && !isRequestStepAligned(req) {
@@ -266,7 +274,7 @@ var (
)
// areEvaluationTimeModifiersCachable returns true if the @ modifier and the offset modifier results are safe to cache.
-func areEvaluationTimeModifiersCachable(r Request, maxCacheTime int64, logger log.Logger) bool {
+func areEvaluationTimeModifiersCachable(r MetricsQueryRequest, maxCacheTime int64, logger log.Logger) bool {
// There are 3 cases when evaluation time modifiers are not safe to cache:
// 1. When @ modifier points to time beyond the maxCacheTime.
// 2. If the @ modifier time is > the query range end while being
@@ -329,7 +337,7 @@ func getHeaderValuesWithName(r Response, headerName string) (headerValues []stri
// mergeCacheExtentsForRequest merges the provided cache extents for the input request and returns merged extents.
// The input extents can be overlapping and are not required to be sorted.
-func mergeCacheExtentsForRequest(ctx context.Context, r Request, merger Merger, extents []Extent) ([]Extent, error) {
+func mergeCacheExtentsForRequest(ctx context.Context, r MetricsQueryRequest, merger Merger, extents []Extent) ([]Extent, error) {
// Fast path.
if len(extents) <= 1 {
return extents, nil
@@ -424,7 +432,7 @@ func newAccumulator(base Extent) (*accumulator, error) {
}, nil
}
-func toExtent(ctx context.Context, req Request, res Response, queryTime time.Time) (Extent, error) {
+func toExtent(ctx context.Context, req MetricsQueryRequest, res Response, queryTime time.Time) (Extent, error) {
marshalled, err := types.MarshalAny(res)
if err != nil {
return Extent{}, err
@@ -440,8 +448,8 @@ func toExtent(ctx context.Context, req Request, res Response, queryTime time.Tim
// partitionCacheExtents calculates the required requests to satisfy req given the cached data.
// extents must be in order by start time.
-func partitionCacheExtents(req Request, extents []Extent, minCacheExtent int64, extractor Extractor) ([]Request, []Response, error) {
- var requests []Request
+func partitionCacheExtents(req MetricsQueryRequest, extents []Extent, minCacheExtent int64, extractor Extractor) ([]MetricsQueryRequest, []Response, error) {
+ var requests []MetricsQueryRequest
var cachedResponses []Response
start := req.GetStart()
@@ -504,7 +512,7 @@ func partitionCacheExtents(req Request, extents []Extent, minCacheExtent int64,
return requests, cachedResponses, nil
}
-func filterRecentCacheExtents(req Request, maxCacheFreshness time.Duration, extractor Extractor, extents []Extent) ([]Extent, error) {
+func filterRecentCacheExtents(req MetricsQueryRequest, maxCacheFreshness time.Duration, extractor Extractor, extents []Extent) ([]Extent, error) {
maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / req.GetStep()) * req.GetStep()
for i := range extents {
// Never cache data for the latest freshness period.
diff --git a/pkg/frontend/querymiddleware/results_cache_test.go b/pkg/frontend/querymiddleware/results_cache_test.go
index 0dd86dfd975..ecb7b005c08 100644
--- a/pkg/frontend/querymiddleware/results_cache_test.go
+++ b/pkg/frontend/querymiddleware/results_cache_test.go
@@ -15,6 +15,7 @@ import (
"github.com/gogo/protobuf/types"
"github.com/grafana/dskit/cache"
"github.com/grafana/dskit/flagext"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -129,7 +130,7 @@ func TestIsRequestCachable(t *testing.T) {
for _, tc := range []struct {
name string
- request Request
+ request MetricsQueryRequest
expected bool
expectedNotCachableReason string
cacheStepUnaligned bool
@@ -372,9 +373,9 @@ func TestIsResponseCachable(t *testing.T) {
func TestPartitionCacheExtents(t *testing.T) {
for _, tc := range []struct {
name string
- input Request
+ input MetricsQueryRequest
prevCachedResponse []Extent
- expectedRequests []Request
+ expectedRequests []MetricsQueryRequest
expectedCachedResponse []Response
}{
{
@@ -402,7 +403,7 @@ func TestPartitionCacheExtents(t *testing.T) {
prevCachedResponse: []Extent{
mkExtent(110, 210),
},
- expectedRequests: []Request{
+ expectedRequests: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{
Start: 0,
End: 100,
@@ -420,7 +421,7 @@ func TestPartitionCacheExtents(t *testing.T) {
prevCachedResponse: []Extent{
mkExtent(50, 100),
},
- expectedRequests: []Request{
+ expectedRequests: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{
Start: 0,
End: 50,
@@ -442,7 +443,7 @@ func TestPartitionCacheExtents(t *testing.T) {
mkExtent(50, 120),
mkExtent(160, 250),
},
- expectedRequests: []Request{
+ expectedRequests: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{
Start: 120,
End: 160,
@@ -465,7 +466,7 @@ func TestPartitionCacheExtents(t *testing.T) {
mkExtent(50, 120),
mkExtent(122, 130),
},
- expectedRequests: []Request{
+ expectedRequests: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{
Start: 120,
End: 160,
@@ -486,7 +487,7 @@ func TestPartitionCacheExtents(t *testing.T) {
prevCachedResponse: []Extent{
mkExtent(50, 90),
},
- expectedRequests: []Request{
+ expectedRequests: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{
Start: 100,
End: 100,
@@ -525,7 +526,7 @@ func TestPartitionCacheExtents(t *testing.T) {
expectedCachedResponse: []Response{
mkAPIResponse(486, 625, 33),
},
- expectedRequests: []Request{
+ expectedRequests: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 123, End: 486, Step: 33},
&PrometheusRangeQueryRequest{
Start: 651, // next number after 625 (end of extent) such that it is equal to input.Start + N * input.Step.
@@ -554,11 +555,14 @@ func TestPartitionCacheExtents(t *testing.T) {
func TestDefaultSplitter_QueryRequest(t *testing.T) {
t.Parallel()
+ reg := prometheus.NewPedanticRegistry()
+ codec := NewPrometheusCodec(reg, formatJSON)
+
ctx := context.Background()
tests := []struct {
name string
- r Request
+ r MetricsQueryRequest
interval time.Duration
want string
}{
@@ -575,7 +579,7 @@ func TestDefaultSplitter_QueryRequest(t *testing.T) {
}
for _, tt := range tests {
t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) {
- if got := (DefaultCacheKeyGenerator{tt.interval}).QueryRequest(ctx, "fake", tt.r); got != tt.want {
+ if got := (DefaultCacheKeyGenerator{codec: codec, interval: tt.interval}).QueryRequest(ctx, "fake", tt.r); got != tt.want {
t.Errorf("generateKey() = %v, want %v", got, tt.want)
}
})
diff --git a/pkg/frontend/querymiddleware/retry.go b/pkg/frontend/querymiddleware/retry.go
index 180ba683460..5a75cff1b09 100644
--- a/pkg/frontend/querymiddleware/retry.go
+++ b/pkg/frontend/querymiddleware/retry.go
@@ -41,7 +41,7 @@ func (m *retryMiddlewareMetrics) Observe(v float64) {
type retry struct {
log log.Logger
- next Handler
+ next MetricsQueryHandler
maxRetries int
metrics prometheus.Observer
@@ -49,12 +49,12 @@ type retry struct {
// newRetryMiddleware returns a middleware that retries requests if they
// fail with 500 or a non-HTTP error.
-func newRetryMiddleware(log log.Logger, maxRetries int, metrics prometheus.Observer) Middleware {
+func newRetryMiddleware(log log.Logger, maxRetries int, metrics prometheus.Observer) MetricsQueryMiddleware {
if metrics == nil {
metrics = newRetryMiddlewareMetrics(nil)
}
- return MiddlewareFunc(func(next Handler) Handler {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return retry{
log: log,
next: next,
@@ -64,7 +64,7 @@ func newRetryMiddleware(log log.Logger, maxRetries int, metrics prometheus.Obser
})
}
-func (r retry) Do(ctx context.Context, req Request) (Response, error) {
+func (r retry) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) {
tries := 0
defer func() { r.metrics.Observe(float64(tries)) }()
diff --git a/pkg/frontend/querymiddleware/retry_test.go b/pkg/frontend/querymiddleware/retry_test.go
index e0d75c6bd58..af2e9b11846 100644
--- a/pkg/frontend/querymiddleware/retry_test.go
+++ b/pkg/frontend/querymiddleware/retry_test.go
@@ -35,7 +35,7 @@ func TestRetry(t *testing.T) {
for _, tc := range []struct {
name string
- handler Handler
+ handler MetricsQueryHandler
resp Response
err error
expectedRetries int
@@ -43,7 +43,7 @@ func TestRetry(t *testing.T) {
{
name: "retry failures",
expectedRetries: 4,
- handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
if try.Inc() == 5 {
return &PrometheusResponse{Status: "Hello World"}, nil
}
@@ -54,7 +54,7 @@ func TestRetry(t *testing.T) {
{
name: "don't retry 400s",
expectedRetries: 0,
- handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
return nil, errBadRequest
}),
err: errBadRequest,
@@ -62,7 +62,7 @@ func TestRetry(t *testing.T) {
{
name: "don't retry bad-data",
expectedRetries: 0,
- handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
return nil, errUnprocessable
}),
err: errUnprocessable,
@@ -70,7 +70,7 @@ func TestRetry(t *testing.T) {
{
name: "retry 500s",
expectedRetries: 5,
- handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
return nil, errInternal
}),
err: errInternal,
@@ -78,7 +78,7 @@ func TestRetry(t *testing.T) {
{
name: "last error",
expectedRetries: 4,
- handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
if try.Inc() == 5 {
return nil, errBadRequest
}
@@ -112,7 +112,7 @@ func Test_RetryMiddlewareCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := newRetryMiddleware(log.NewNopLogger(), 5, nil).Wrap(
- HandlerFunc(func(c context.Context, r Request) (Response, error) {
+ HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
try.Inc()
return nil, ctx.Err()
}),
@@ -122,7 +122,7 @@ func Test_RetryMiddlewareCancel(t *testing.T) {
ctx, cancel = context.WithCancel(context.Background())
_, err = newRetryMiddleware(log.NewNopLogger(), 5, nil).Wrap(
- HandlerFunc(func(c context.Context, r Request) (Response, error) {
+ HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
try.Inc()
cancel()
return nil, errors.New("failed")
diff --git a/pkg/frontend/querymiddleware/roundtrip.go b/pkg/frontend/querymiddleware/roundtrip.go
index e68c95b898b..293b2c3e2c8 100644
--- a/pkg/frontend/querymiddleware/roundtrip.go
+++ b/pkg/frontend/querymiddleware/roundtrip.go
@@ -63,6 +63,7 @@ type Config struct {
ShardedQueries bool `yaml:"parallelize_shardable_queries"`
TargetSeriesPerShard uint64 `yaml:"query_sharding_target_series_per_shard" category:"advanced"`
ShardActiveSeriesQueries bool `yaml:"shard_active_series_queries" category:"experimental"`
+ UseActiveSeriesDecoder bool `yaml:"use_active_series_decoder" category:"experimental"`
// CacheKeyGenerator allows to inject a CacheKeyGenerator to use for generating cache keys.
// If nil, the querymiddleware package uses a DefaultCacheKeyGenerator with SplitQueriesByInterval.
@@ -81,6 +82,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.Uint64Var(&cfg.TargetSeriesPerShard, "query-frontend.query-sharding-target-series-per-shard", 0, "How many series a single sharded partial query should load at most. This is not a strict requirement guaranteed to be honoured by query sharding, but a hint given to the query sharding when the query execution is initially planned. 0 to disable cardinality-based hints.")
f.StringVar(&cfg.QueryResultResponseFormat, "query-frontend.query-result-response-format", formatProtobuf, fmt.Sprintf("Format to use when retrieving query results from queriers. Supported values: %s", strings.Join(allFormats, ", ")))
f.BoolVar(&cfg.ShardActiveSeriesQueries, "query-frontend.shard-active-series-queries", false, "True to enable sharding of active series queries.")
+ f.BoolVar(&cfg.UseActiveSeriesDecoder, "query-frontend.use-active-series-decoder", false, "Set to true to use the zero-allocation response decoder for active series queries.")
cfg.ResultsCacheConfig.RegisterFlags(f)
// The query-frontend.align-queries-with-step flag has been moved to the limits.go file
@@ -115,36 +117,36 @@ func (cfg *Config) cardinalityBasedShardingEnabled() bool {
return cfg.TargetSeriesPerShard > 0
}
-// HandlerFunc is like http.HandlerFunc, but for Handler.
-type HandlerFunc func(context.Context, Request) (Response, error)
+// HandlerFunc is like http.HandlerFunc, but for MetricsQueryHandler.
+type HandlerFunc func(context.Context, MetricsQueryRequest) (Response, error)
-// Do implements Handler.
-func (q HandlerFunc) Do(ctx context.Context, req Request) (Response, error) {
+// Do implements MetricsQueryHandler.
+func (q HandlerFunc) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) {
return q(ctx, req)
}
-// Handler is like http.Handle, but specifically for Prometheus query_range calls.
-type Handler interface {
- Do(context.Context, Request) (Response, error)
+// MetricsQueryHandler is like http.Handle, but specifically for Prometheus query and query_range calls.
+type MetricsQueryHandler interface {
+ Do(context.Context, MetricsQueryRequest) (Response, error)
}
-// MiddlewareFunc is like http.HandlerFunc, but for Middleware.
-type MiddlewareFunc func(Handler) Handler
+// MetricsQueryMiddlewareFunc is like http.HandlerFunc, but for MetricsQueryMiddleware.
+type MetricsQueryMiddlewareFunc func(MetricsQueryHandler) MetricsQueryHandler
-// Wrap implements Middleware.
-func (q MiddlewareFunc) Wrap(h Handler) Handler {
+// Wrap implements MetricsQueryMiddleware.
+func (q MetricsQueryMiddlewareFunc) Wrap(h MetricsQueryHandler) MetricsQueryHandler {
return q(h)
}
-// Middleware is a higher order Handler.
-type Middleware interface {
- Wrap(Handler) Handler
+// MetricsQueryMiddleware is a higher order MetricsQueryHandler.
+type MetricsQueryMiddleware interface {
+ Wrap(MetricsQueryHandler) MetricsQueryHandler
}
-// MergeMiddlewares produces a middleware that applies multiple middleware in turn;
+// MergeMetricsQueryMiddlewares produces a middleware that applies multiple middleware in turn;
// ie Merge(f,g,h).Wrap(handler) == f.Wrap(g.Wrap(h.Wrap(handler)))
-func MergeMiddlewares(middleware ...Middleware) Middleware {
- return MiddlewareFunc(func(next Handler) Handler {
+func MergeMetricsQueryMiddlewares(middleware ...MetricsQueryMiddleware) MetricsQueryMiddleware {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
for i := len(middleware) - 1; i >= 0; i-- {
next = middleware[i].Wrap(next)
}
@@ -217,7 +219,7 @@ func newQueryTripperware(
queryBlockerMiddleware := newQueryBlockerMiddleware(limits, log, registerer)
queryStatsMiddleware := newQueryStatsMiddleware(registerer, engine)
- queryRangeMiddleware := []Middleware{
+ queryRangeMiddleware := []MetricsQueryMiddleware{
// Track query range statistics. Added first before any subsequent middleware modifies the request.
queryStatsMiddleware,
newLimitsMiddleware(limits, log),
@@ -239,12 +241,12 @@ func newQueryTripperware(
cacheKeyGenerator := cfg.CacheKeyGenerator
if cacheKeyGenerator == nil {
- cacheKeyGenerator = DefaultCacheKeyGenerator{Interval: cfg.SplitQueriesByInterval}
+ cacheKeyGenerator = NewDefaultCacheKeyGenerator(codec, cfg.SplitQueriesByInterval)
}
// Inject the middleware to split requests by interval + results cache (if at least one of the two is enabled).
if cfg.SplitQueriesByInterval > 0 || cfg.CacheResults {
- shouldCache := func(r Request) bool {
+ shouldCache := func(r MetricsQueryRequest) bool {
return !r.GetOptions().CacheDisabled
}
@@ -263,7 +265,7 @@ func newQueryTripperware(
))
}
- queryInstantMiddleware := []Middleware{
+ queryInstantMiddleware := []MetricsQueryMiddleware{
// Track query range statistics. Added first before any subsequent middleware modifies the request.
queryStatsMiddleware,
newLimitsMiddleware(limits, log),
@@ -334,7 +336,7 @@ func newQueryTripperware(
}
if cfg.ShardActiveSeriesQueries {
- activeSeries = newShardActiveSeriesMiddleware(activeSeries, limits, log)
+ activeSeries = newShardActiveSeriesMiddleware(activeSeries, cfg.UseActiveSeriesDecoder, limits, log)
}
return RoundTripFunc(func(r *http.Request) (*http.Response, error) {
@@ -431,8 +433,16 @@ func IsCardinalityQuery(path string) bool {
strings.HasSuffix(path, cardinalityLabelValuesPathSuffix)
}
+func IsLabelNamesQuery(path string) bool {
+ return strings.HasSuffix(path, labelNamesPathSuffix)
+}
+
+func IsLabelValuesQuery(path string) bool {
+ return labelValuesPathSuffix.MatchString(path)
+}
+
func IsLabelsQuery(path string) bool {
- return strings.HasSuffix(path, labelNamesPathSuffix) || labelValuesPathSuffix.MatchString(path)
+ return IsLabelNamesQuery(path) || IsLabelValuesQuery(path)
}
func IsActiveSeriesQuery(path string) bool {
diff --git a/pkg/frontend/querymiddleware/roundtrip_test.go b/pkg/frontend/querymiddleware/roundtrip_test.go
index dbea50cf822..dc4fdcb81c4 100644
--- a/pkg/frontend/querymiddleware/roundtrip_test.go
+++ b/pkg/frontend/querymiddleware/roundtrip_test.go
@@ -367,7 +367,7 @@ func TestTripperware_Metrics(t *testing.T) {
s := httptest.NewServer(
middleware.AuthenticateUser.Wrap(
- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", jsonMimeType)
_, err := w.Write([]byte("{}"))
require.NoError(t, err)
diff --git a/pkg/frontend/querymiddleware/running_test.go b/pkg/frontend/querymiddleware/running_test.go
index cce833240bc..0d11f03e729 100644
--- a/pkg/frontend/querymiddleware/running_test.go
+++ b/pkg/frontend/querymiddleware/running_test.go
@@ -28,7 +28,7 @@ func TestAwaitQueryFrontendServiceRunning_ServiceIsReady(t *testing.T) {
func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyWaitDisabled(t *testing.T) {
startChan := make(chan struct{})
- start := func(ctx context.Context) error {
+ start := func(context.Context) error {
<-startChan
return nil
}
@@ -45,7 +45,7 @@ func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyWaitDisabled(t *testi
func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyInitially(t *testing.T) {
startChan := make(chan struct{})
- start := func(ctx context.Context) error {
+ start := func(context.Context) error {
<-startChan
return nil
}
@@ -69,7 +69,7 @@ func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyInitially(t *testing.
func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyAfterTimeout(t *testing.T) {
serviceChan := make(chan struct{})
- start := func(ctx context.Context) error {
+ start := func(context.Context) error {
<-serviceChan
return nil
}
diff --git a/pkg/frontend/querymiddleware/shard_active_series.go b/pkg/frontend/querymiddleware/shard_active_series.go
index 32787875554..54fc791411b 100644
--- a/pkg/frontend/querymiddleware/shard_active_series.go
+++ b/pkg/frontend/querymiddleware/shard_active_series.go
@@ -3,6 +3,7 @@
package querymiddleware
import (
+ "bytes"
"context"
"fmt"
"io"
@@ -11,6 +12,7 @@ import (
"os"
"strconv"
"sync"
+ "unsafe"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@@ -65,16 +67,18 @@ func getSnappyWriter(w io.Writer) *s2.Writer {
}
type shardActiveSeriesMiddleware struct {
- upstream http.RoundTripper
- limits Limits
- logger log.Logger
+ upstream http.RoundTripper
+ useZeroAllocationDecoder bool
+ limits Limits
+ logger log.Logger
}
-func newShardActiveSeriesMiddleware(upstream http.RoundTripper, limits Limits, logger log.Logger) http.RoundTripper {
+func newShardActiveSeriesMiddleware(upstream http.RoundTripper, useZeroAllocationDecoder bool, limits Limits, logger log.Logger) http.RoundTripper {
return &shardActiveSeriesMiddleware{
- upstream: upstream,
- limits: limits,
- logger: logger,
+ upstream: upstream,
+ useZeroAllocationDecoder: useZeroAllocationDecoder,
+ limits: limits,
+ logger: logger,
}
}
@@ -124,8 +128,12 @@ func (s *shardActiveSeriesMiddleware) RoundTrip(r *http.Request) (*http.Response
}
return nil, apierror.New(apierror.TypeInternal, err.Error())
}
+ acceptEncoding := r.Header.Get("Accept-Encoding")
- return s.mergeResponses(ctx, resp, r.Header.Get("Accept-Encoding")), nil
+ if s.useZeroAllocationDecoder {
+ return s.mergeResponsesWithZeroAllocationDecoder(ctx, resp, acceptEncoding), nil
+ }
+ return s.mergeResponses(ctx, resp, acceptEncoding), nil
}
func setShardCountFromHeader(origShardCount int, r *http.Request, spanLog *spanlogger.SpanLogger) int {
@@ -356,6 +364,48 @@ func (s *shardActiveSeriesMiddleware) mergeResponses(ctx context.Context, respon
return resp
}
+func (s *shardActiveSeriesMiddleware) mergeResponsesWithZeroAllocationDecoder(ctx context.Context, responses []*http.Response, encoding string) *http.Response {
+ reader, writer := io.Pipe()
+
+ streamCh := make(chan *bytes.Buffer)
+
+ g, gCtx := errgroup.WithContext(ctx)
+ for _, res := range responses {
+ if res == nil {
+ continue
+ }
+ r := res
+ g.Go(func() error {
+ dec := borrowShardActiveSeriesResponseDecoder(gCtx, r.Body, streamCh)
+ defer func() {
+ dec.close()
+ reuseShardActiveSeriesResponseDecoder(dec)
+ }()
+
+ if err := dec.decode(); err != nil {
+ return err
+ }
+ return dec.streamData()
+ })
+ }
+
+ go func() {
+ // We ignore the error from the errgroup because it will be checked again later.
+ _ = g.Wait()
+ close(streamCh)
+ }()
+
+ resp := &http.Response{Body: reader, StatusCode: http.StatusOK, Header: http.Header{}}
+ resp.Header.Set("Content-Type", "application/json")
+ if encoding == encodingTypeSnappyFramed {
+ resp.Header.Set("Content-Encoding", encodingTypeSnappyFramed)
+ }
+
+ go s.writeMergedResponseWithZeroAllocationDecoder(gCtx, g.Wait, writer, streamCh, encoding)
+
+ return resp
+}
+
func (s *shardActiveSeriesMiddleware) writeMergedResponse(ctx context.Context, check func() error, w io.WriteCloser, items <-chan *labels.Builder, encoding string) {
defer w.Close()
@@ -434,3 +484,74 @@ func (s *shardActiveSeriesMiddleware) writeMergedResponse(ctx context.Context, c
stream.WriteObjectEnd()
}
+
+func (s *shardActiveSeriesMiddleware) writeMergedResponseWithZeroAllocationDecoder(ctx context.Context, check func() error, w io.WriteCloser, streamCh chan *bytes.Buffer, encoding string) {
+ defer w.Close()
+
+ span, _ := opentracing.StartSpanFromContext(ctx, "shardActiveSeries.writeMergedResponseWithZeroAllocationDecoder")
+ defer span.Finish()
+
+ var out io.Writer = w
+ if encoding == encodingTypeSnappyFramed {
+ span.LogFields(otlog.String("encoding", encodingTypeSnappyFramed))
+ enc := getSnappyWriter(w)
+ out = enc
+ defer func() {
+ enc.Close()
+ // Reset the encoder before putting it back to pool to avoid it to hold the writer.
+ enc.Reset(nil)
+ snappyWriterPool.Put(enc)
+ }()
+ } else {
+ span.LogFields(otlog.String("encoding", "none"))
+ }
+
+ stream := jsoniter.ConfigFastest.BorrowStream(out)
+ defer func(stream *jsoniter.Stream) {
+ _ = stream.Flush()
+
+ if cap(stream.Buffer()) > jsoniterMaxBufferSize {
+ return
+ }
+ jsoniter.ConfigFastest.ReturnStream(stream)
+ }(stream)
+
+ stream.WriteObjectStart()
+ stream.WriteObjectField("data")
+ stream.WriteArrayStart()
+
+ firstItem := true
+ for streamBuf := range streamCh {
+ if firstItem {
+ firstItem = false
+ } else {
+ stream.WriteMore()
+ }
+ rawStr := unsafe.String(unsafe.SliceData(streamBuf.Bytes()), streamBuf.Len())
+
+ // Write the value as is, since it's already a JSON array.
+ stream.WriteRaw(rawStr)
+
+ // Flush the stream buffer if it's getting too large.
+ if stream.Buffered() > jsoniterMaxBufferSize {
+ _ = stream.Flush()
+ }
+
+ // Reuse stream buffer.
+ reuseActiveSeriesDataStreamBuffer(streamBuf)
+ }
+ stream.WriteArrayEnd()
+
+ if err := check(); err != nil {
+ level.Error(s.logger).Log("msg", "error merging partial responses", "err", err.Error())
+ span.LogFields(otlog.Error(err))
+ stream.WriteMore()
+ stream.WriteObjectField("status")
+ stream.WriteString("error")
+ stream.WriteMore()
+ stream.WriteObjectField("error")
+ stream.WriteString(fmt.Sprintf("error merging partial responses: %s", err.Error()))
+ }
+
+ stream.WriteObjectEnd()
+}
diff --git a/pkg/frontend/querymiddleware/shard_active_series_response_decoder.go b/pkg/frontend/querymiddleware/shard_active_series_response_decoder.go
new file mode 100644
index 00000000000..53d175ad606
--- /dev/null
+++ b/pkg/frontend/querymiddleware/shard_active_series_response_decoder.go
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+//
+// Portions of this file are derived from json-iterator/go (https://github.com/json-iterator/go),
+// which is licensed under the MIT License. The specific functions derived from this source are
+// noted below in this file.
+
+package querymiddleware
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ activeSeriesChunkMaxBufferSize = 1024 * 1024 // 1MB
+
+ checkContextCancelledBytesInterval = 256
+)
+
+var activeSeriesChunkBufferPool = sync.Pool{
+ New: func() any {
+ return bytes.NewBuffer(make([]byte, 0, activeSeriesChunkMaxBufferSize))
+ },
+}
+
+var shardActiveSeriesResponseDecoderPool = sync.Pool{
+ New: func() any {
+ return &shardActiveSeriesResponseDecoder{
+ br: bufio.NewReaderSize(nil, 4096),
+ strBuff: make([]byte, 0, 256),
+ }
+ },
+}
+
+func borrowShardActiveSeriesResponseDecoder(ctx context.Context, rc io.ReadCloser, streamCh chan<- *bytes.Buffer) *shardActiveSeriesResponseDecoder {
+ d := shardActiveSeriesResponseDecoderPool.Get().(*shardActiveSeriesResponseDecoder)
+ d.reset(ctx, rc, streamCh)
+ return d
+}
+
+func reuseShardActiveSeriesResponseDecoder(d *shardActiveSeriesResponseDecoder) {
+ d.reset(context.Background(), nil, nil)
+ shardActiveSeriesResponseDecoderPool.Put(d)
+}
+
+func reuseActiveSeriesDataStreamBuffer(buf *bytes.Buffer) {
+ buf.Reset()
+ activeSeriesChunkBufferPool.Put(buf)
+}
+
+type shardActiveSeriesResponseDecoder struct {
+ ctx context.Context
+ rc io.ReadCloser
+ br *bufio.Reader
+ strBuff []byte
+ streamCh chan<- *bytes.Buffer
+ readBytesCount int
+ err error
+}
+
+func (d *shardActiveSeriesResponseDecoder) reset(ctx context.Context, rc io.ReadCloser, streamCh chan<- *bytes.Buffer) {
+ d.br.Reset(rc)
+
+ d.ctx = ctx
+ d.rc = rc
+ d.streamCh = streamCh
+ d.strBuff = d.strBuff[:0]
+ d.readBytesCount = 0
+ d.err = nil
+}
+
+func (d *shardActiveSeriesResponseDecoder) stickError(err error) {
+ if d.err != nil {
+ return
+ }
+ d.err = err
+}
+
+func (d *shardActiveSeriesResponseDecoder) close() {
+ _, _ = io.Copy(io.Discard, d.rc)
+ _ = d.rc.Close()
+}
+
+func (d *shardActiveSeriesResponseDecoder) decode() error {
+ c := d.nextToken()
+ if d.err != nil {
+ return d.err
+ }
+ switch c {
+ case '{':
+ for d.err == nil {
+ k := d.readString()
+ switch k {
+ case "data":
+ d.readData()
+ if d.err != nil {
+ return d.err
+ }
+ return nil
+
+ case "error":
+ d.readError()
+ return d.err
+
+ default:
+ d.skipValue()
+ }
+
+ c = d.nextToken()
+ if c == ',' {
+ continue
+ } else if c == '}' {
+ break
+ }
+ }
+
+ default:
+ return fmt.Errorf("decode: expected '{', found %c", c)
+ }
+ return errors.New("expected data field at top level")
+}
+
+func (d *shardActiveSeriesResponseDecoder) readData() {
+ defer func() {
+ if err := d.ctx.Err(); err != nil {
+ d.stickError(err)
+ }
+ }()
+
+ if c := d.nextToken(); c != ':' {
+ d.stickError(fmt.Errorf("readData: expected ':', found %c", c))
+ return
+ }
+ switch d.nextToken() {
+ case '[':
+ return
+
+ default:
+ d.stickError(errors.New("expected data field to contain an array"))
+ }
+}
+
+func (d *shardActiveSeriesResponseDecoder) readError() {
+ defer func() {
+ if err := d.ctx.Err(); err != nil {
+ d.stickError(err)
+ }
+ }()
+
+ if c := d.nextToken(); c != ':' {
+ d.stickError(fmt.Errorf("readError: expected ':', found %c", c))
+ return
+ }
+ d.stickError(fmt.Errorf("error in partial response: %s", d.readString()))
+}
+
+func (d *shardActiveSeriesResponseDecoder) streamData() error {
+ firstItem := true
+ expectsComma := false
+
+ cb := activeSeriesChunkBufferPool.Get().(*bytes.Buffer)
+ for d.err == nil {
+ t := d.nextToken()
+ switch t {
+ case ']':
+ if cb.Len() > 0 {
+ d.streamCh <- cb
+ }
+ d.checkContextCanceled()
+ return d.err
+
+ case '{':
+ if !firstItem {
+ cb.WriteByte(',')
+ } else {
+ firstItem = false
+ }
+ cb.WriteByte(t)
+
+ d.readObject(cb)
+ if d.err != nil {
+ return d.err
+ }
+ expectsComma = true
+
+ case ',':
+ if expectsComma {
+ expectsComma = false
+ break
+ }
+ d.stickError(errors.New("streamData: unexpected comma"))
+ return d.err
+
+ default:
+ d.stickError(fmt.Errorf("streamData: expected '{' or ',', found %c", t))
+ return d.err
+ }
+
+ if cb.Len() >= activeSeriesChunkMaxBufferSize {
+ d.streamCh <- cb
+ cb = activeSeriesChunkBufferPool.Get().(*bytes.Buffer)
+ }
+ }
+ d.checkContextCanceled()
+ return d.err
+}
+
+func (d *shardActiveSeriesResponseDecoder) nextToken() byte {
+ for {
+ c := d.readByte()
+ if d.err != nil {
+ return 0
+ }
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ default:
+ return c
+ }
+ }
+}
+
+func (d *shardActiveSeriesResponseDecoder) readString() string {
+ c := d.nextToken()
+ if c != '"' {
+ d.stickError(fmt.Errorf(`readString: expected '"', found %c`, c))
+ return ""
+ }
+ d.strBuff = d.strBuff[:0]
+
+ for d.err == nil {
+ c = d.readByte()
+ if c == '"' {
+ return unsafe.String(
+ unsafe.SliceData(d.strBuff), len(d.strBuff),
+ )
+ }
+ if c == '\\' {
+ c = d.readByte()
+ d.strBuff = d.readEscapedChar(c, d.strBuff)
+ } else {
+ d.strBuff = append(d.strBuff, c)
+ }
+ }
+ d.stickError(errors.New("readString: unexpected end of input"))
+ return ""
+}
+
+func (d *shardActiveSeriesResponseDecoder) readObject(buf *bytes.Buffer) {
+ inner := 1
+ for d.err == nil {
+ c := d.readByte()
+ if buf != nil {
+ buf.WriteByte(c)
+ }
+ switch c {
+ case '{':
+ inner++
+ case '}':
+ inner--
+ if inner == 0 {
+ return
+ }
+ }
+ }
+}
+
+func (d *shardActiveSeriesResponseDecoder) skipValue() {
+ if tk := d.nextToken(); tk != ':' {
+ d.stickError(fmt.Errorf("skipValue: expected ':', found %c", tk))
+ return
+ }
+ switch d.nextToken() {
+ case '{': // object
+ d.skipObject()
+
+ case '[': // array
+ d.skipArray()
+
+ case '"': // string
+ d.skipString()
+
+ default: // number, true, false, null
+ c := d.nextToken()
+ for d.err == nil {
+ if c == ',' || c == '}' {
+ d.unreadByte()
+ break
+ }
+ c = d.nextToken()
+ }
+ }
+}
+
+func (d *shardActiveSeriesResponseDecoder) skipObject() {
+ d.readObject(nil)
+}
+
+func (d *shardActiveSeriesResponseDecoder) skipArray() {
+ inner := 1
+ for d.err == nil {
+ switch d.readByte() {
+ case '[':
+ inner++
+ case ']':
+ inner--
+ if inner == 0 {
+ return
+ }
+ }
+ }
+}
+
+func (d *shardActiveSeriesResponseDecoder) skipString() {
+ d.unreadByte()
+ _ = d.readString()
+ d.strBuff = d.strBuff[:0]
+}
+
+func (d *shardActiveSeriesResponseDecoder) readByte() byte {
+ b, err := d.br.ReadByte()
+ if err != nil {
+ d.stickError(err)
+ return 0
+ }
+ // Check for context cancellation
+ d.readBytesCount++
+ if d.readBytesCount%checkContextCancelledBytesInterval == 0 {
+ d.checkContextCanceled()
+ }
+ return b
+}
+
+func (d *shardActiveSeriesResponseDecoder) unreadByte() {
+ if err := d.br.UnreadByte(); err != nil {
+ d.stickError(err)
+ }
+}
+
+func (d *shardActiveSeriesResponseDecoder) checkContextCanceled() {
+ if err := d.ctx.Err(); err != nil {
+ if cause := context.Cause(d.ctx); cause != nil {
+ d.stickError(fmt.Errorf("context canceled: %w", cause))
+ return
+ }
+ d.stickError(err)
+ }
+}
+
+// Originally from json-iterator/go (https://github.com/json-iterator/go/blob/71ac16282d122fdd1e3a6d3e7f79b79b4cc3b50e/iter_str.go#L54)
+func (d *shardActiveSeriesResponseDecoder) readEscapedChar(c byte, str []byte) []byte {
+ switch c {
+ case 'u':
+ r := d.readU4()
+ if utf16.IsSurrogate(r) {
+ c = d.readByte()
+ if d.err != nil {
+ return nil
+ }
+ if c != '\\' {
+ d.unreadByte()
+ str = appendRune(str, r)
+ return str
+ }
+ c = d.readByte()
+ if d.err != nil {
+ return nil
+ }
+ if c != 'u' {
+ str = appendRune(str, r)
+ return d.readEscapedChar(c, str)
+ }
+ r2 := d.readU4()
+ if d.err != nil {
+ return nil
+ }
+ combined := utf16.DecodeRune(r, r2)
+ if combined == '\uFFFD' {
+ str = appendRune(str, r)
+ str = appendRune(str, r2)
+ } else {
+ str = appendRune(str, combined)
+ }
+ } else {
+ str = appendRune(str, r)
+ }
+ case '"':
+ str = append(str, '"')
+ case '\\':
+ str = append(str, '\\')
+ case '/':
+ str = append(str, '/')
+ case 'b':
+ str = append(str, '\b')
+ case 'f':
+ str = append(str, '\f')
+ case 'n':
+ str = append(str, '\n')
+ case 'r':
+ str = append(str, '\r')
+ case 't':
+ str = append(str, '\t')
+ default:
+ d.stickError(errors.New(`readEscapedChar: invalid escape char after \`))
+ return nil
+ }
+ return str
+}
+
+// Originally from json-iterator/go (https://github.com/json-iterator/go/blob/71ac16282d122fdd1e3a6d3e7f79b79b4cc3b50e/iter_str.go#L146)
+func (d *shardActiveSeriesResponseDecoder) readU4() (ret rune) {
+ for i := 0; i < 4; i++ {
+ c := d.readByte()
+ if d.err != nil {
+ return
+ }
+ if c >= '0' && c <= '9' {
+ ret = ret*16 + rune(c-'0')
+ } else if c >= 'a' && c <= 'f' {
+ ret = ret*16 + rune(c-'a'+10)
+ } else if c >= 'A' && c <= 'F' {
+ ret = ret*16 + rune(c-'A'+10)
+ } else {
+ d.stickError(errors.New("readU4: expects 0~9 or a~f, but found " + string([]byte{c})))
+ return
+ }
+ }
+ return ret
+}
+
+const (
+ t1 = 0x00 // 0000 0000
+ tx = 0x80 // 1000 0000
+ t2 = 0xC0 // 1100 0000
+ t3 = 0xE0 // 1110 0000
+ t4 = 0xF0 // 1111 0000
+ t5 = 0xF8 // 1111 1000
+
+ maskx = 0x3F // 0011 1111
+ mask2 = 0x1F // 0001 1111
+ mask3 = 0x0F // 0000 1111
+ mask4 = 0x07 // 0000 0111
+
+ rune1Max = 1<<7 - 1
+ rune2Max = 1<<11 - 1
+ rune3Max = 1<<16 - 1
+
+ surrogateMin = 0xD800
+ surrogateMax = 0xDFFF
+
+ maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
+ runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
+)
+
+// Originally from json-iterator/go (https://github.com/json-iterator/go/blob/71ac16282d122fdd1e3a6d3e7f79b79b4cc3b50e/iter_str.go#L190)
+func appendRune(p []byte, r rune) []byte {
+ // Negative values are erroneous. Making it unsigned addresses the problem.
+ switch i := uint32(r); {
+ case i <= rune1Max:
+ p = append(p, byte(r))
+ return p
+ case i <= rune2Max:
+ p = append(p, t2|byte(r>>6))
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ case i > maxRune, surrogateMin <= i && i <= surrogateMax:
+ r = runeError
+ fallthrough
+ case i <= rune3Max:
+ p = append(p, t3|byte(r>>12))
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ default:
+ p = append(p, t4|byte(r>>18))
+ p = append(p, tx|byte(r>>12)&maskx)
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ }
+}
diff --git a/pkg/frontend/querymiddleware/shard_active_series_response_decoder_test.go b/pkg/frontend/querymiddleware/shard_active_series_response_decoder_test.go
new file mode 100644
index 00000000000..7661e086814
--- /dev/null
+++ b/pkg/frontend/querymiddleware/shard_active_series_response_decoder_test.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+
+package querymiddleware
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestShardActiveSeriesResponseDecoder(t *testing.T) {
+ tcs := []struct {
+ name string
+ input string
+ expectedOutput string
+ expectedError string
+ }{
+ {
+ name: "empty response",
+ input: "",
+ expectedError: "EOF",
+ },
+ {
+ name: "empty data array",
+ input: `{"data":[]}`,
+ expectedOutput: "",
+ },
+ {
+ name: "skip object",
+ input: `{"unexpected_1":3.141516, "unexpected_2":"skip me", "unexpected_3":[[{}]], "unexpected_4": {"key":[]}, "unexpected_5":null, "unexpected_6":true, "data":[{"__name__":"metric","shard":"1"}]}`,
+ expectedOutput: `{"__name__":"metric","shard":"1"}`,
+ },
+ {
+ name: "multiple labels",
+ input: `{"data":[{"__name__":"metric","shard":"1"},{"__name__":"metric","shard":"2"}]}`,
+ expectedOutput: `{"__name__":"metric","shard":"1"},{"__name__":"metric","shard":"2"}`,
+ },
+ {
+ name: "unexpected comma",
+ input: `{"data":[{"__name__":"metric","shard":"1"},,,{"__name__":"metric","shard":"2"}`,
+ expectedError: "streamData: unexpected comma",
+ },
+ {
+ name: "unexpected end of input",
+ input: `{"data":[{"__name__":"metric","shard":"1"},{"__name__":"metric","shard":"2"}`,
+ expectedError: "EOF",
+ },
+ {
+ name: "error response",
+ input: `{"status":"error","error":"some error"}`,
+ expectedError: "error in partial response: some error",
+ },
+ {
+ name: "unicode escaped characters",
+ input: `{"error":"\u3053\u3093\u306B\u3061\u306F"}`,
+ expectedError: "error in partial response: こんにちは",
+ },
+ {
+ name: "wrong data type",
+ input: `{"data":3.141516}`,
+ expectedError: "expected data field to contain an array",
+ },
+ {
+ name: "missing 'data' and 'error' fields",
+ input: `{"unexpected":3.141516}`,
+ expectedError: "expected data field at top level",
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ var dataStr strings.Builder
+
+ errCh := make(chan error, 1)
+
+ streamCh := make(chan *bytes.Buffer)
+
+ r := strings.NewReader(tc.input)
+ d := borrowShardActiveSeriesResponseDecoder(context.Background(), io.NopCloser(r), streamCh)
+
+ err := d.decode()
+ if err == nil {
+ go func() {
+ errCh <- d.streamData()
+ close(streamCh)
+ }()
+
+ // Drain the data channel.
+ for streamBuf := range streamCh {
+ fmt.Println(streamBuf.String())
+ dataStr.WriteString(streamBuf.String())
+ }
+ } else {
+ errCh <- err
+ }
+
+ err = <-errCh
+
+ if len(tc.expectedError) > 0 {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedError)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.expectedOutput, dataStr.String())
+ }
+ })
+ }
+}
diff --git a/pkg/frontend/querymiddleware/shard_active_series_test.go b/pkg/frontend/querymiddleware/shard_active_series_test.go
index c80f38b064c..3cb7c21761a 100644
--- a/pkg/frontend/querymiddleware/shard_active_series_test.go
+++ b/pkg/frontend/querymiddleware/shard_active_series_test.go
@@ -31,6 +31,14 @@ import (
)
func Test_shardActiveSeriesMiddleware_RoundTrip(t *testing.T) {
+ for _, useZeroAllocationDecoder := range []bool{false, true} {
+ t.Run(fmt.Sprintf("useZeroAllocationDecoder=%t", useZeroAllocationDecoder), func(t *testing.T) {
+ runTestShardActiveSeriesMiddlewareRoundTrip(t, useZeroAllocationDecoder)
+ })
+ }
+}
+
+func runTestShardActiveSeriesMiddlewareRoundTrip(t *testing.T, useZeroAllocationDecoder bool) {
const tenantShardCount = 4
const tenantMaxShardCount = 128
@@ -311,6 +319,7 @@ func Test_shardActiveSeriesMiddleware_RoundTrip(t *testing.T) {
// Run the request through the middleware.
s := newShardActiveSeriesMiddleware(
upstream,
+ useZeroAllocationDecoder,
mockLimits{maxShardedQueries: tenantMaxShardCount, totalShards: tenantShardCount},
log.NewNopLogger(),
)
@@ -355,6 +364,14 @@ func Test_shardActiveSeriesMiddleware_RoundTrip(t *testing.T) {
}
func Test_shardActiveSeriesMiddleware_RoundTrip_concurrent(t *testing.T) {
+ for _, useZeroAllocationDecoder := range []bool{false, true} {
+ t.Run(fmt.Sprintf("useZeroAllocationDecoder=%t", useZeroAllocationDecoder), func(t *testing.T) {
+ runTestShardActiveSeriesMiddlewareRoundTripConcurrent(t, useZeroAllocationDecoder)
+ })
+ }
+}
+
+func runTestShardActiveSeriesMiddlewareRoundTripConcurrent(t *testing.T, useZeroAllocationDecoder bool) {
const shardCount = 4
upstream := RoundTripFunc(func(r *http.Request) (*http.Response, error) {
@@ -372,6 +389,7 @@ func Test_shardActiveSeriesMiddleware_RoundTrip_concurrent(t *testing.T) {
s := newShardActiveSeriesMiddleware(
upstream,
+ useZeroAllocationDecoder,
mockLimits{maxShardedQueries: shardCount, totalShards: shardCount},
log.NewNopLogger(),
)
@@ -423,7 +441,15 @@ func Test_shardActiveSeriesMiddleware_RoundTrip_concurrent(t *testing.T) {
}
func Test_shardActiveSeriesMiddleware_mergeResponse_contextCancellation(t *testing.T) {
- s := newShardActiveSeriesMiddleware(nil, mockLimits{}, log.NewNopLogger()).(*shardActiveSeriesMiddleware)
+ for _, useZeroAllocationDecoder := range []bool{false, true} {
+ t.Run(fmt.Sprintf("useZeroAllocationDecoder=%t", useZeroAllocationDecoder), func(t *testing.T) {
+ runTestShardActiveSeriesMiddlewareMergeResponseContextCancellation(t, useZeroAllocationDecoder)
+ })
+ }
+}
+
+func runTestShardActiveSeriesMiddlewareMergeResponseContextCancellation(t *testing.T, useZeroAllocationDecoder bool) {
+ s := newShardActiveSeriesMiddleware(nil, true, mockLimits{}, log.NewNopLogger()).(*shardActiveSeriesMiddleware)
ctx, cancel := context.WithCancelCause(context.Background())
defer cancel(fmt.Errorf("test ran to completion"))
@@ -439,12 +465,18 @@ func Test_shardActiveSeriesMiddleware_mergeResponse_contextCancellation(t *testi
{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewReader(body))},
{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewReader(body))},
}
+ var resp *http.Response
+
+ if useZeroAllocationDecoder {
+ resp = s.mergeResponsesWithZeroAllocationDecoder(ctx, responses, "")
+ } else {
+ defer func() {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }()
- resp := s.mergeResponses(ctx, responses, "")
- defer func() {
- _, _ = io.Copy(io.Discard, resp.Body)
- _ = resp.Body.Close()
- }()
+ resp = s.mergeResponses(ctx, responses, "")
+ }
var buf bytes.Buffer
_, err = io.CopyN(&buf, resp.Body, int64(os.Getpagesize()))
@@ -512,13 +544,13 @@ func benchmarkActiveSeriesMiddlewareMergeResponses(b *testing.B, encoding string
benchResponses[i] = responses
}
- s := newShardActiveSeriesMiddleware(nil, mockLimits{}, log.NewNopLogger()).(*shardActiveSeriesMiddleware)
+ s := newShardActiveSeriesMiddleware(nil, true, mockLimits{}, log.NewNopLogger()).(*shardActiveSeriesMiddleware)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- resp := s.mergeResponses(context.Background(), benchResponses[i], encoding)
+ resp := s.mergeResponsesWithZeroAllocationDecoder(context.Background(), benchResponses[i], encoding)
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
diff --git a/pkg/frontend/querymiddleware/sharded_queryable.go b/pkg/frontend/querymiddleware/sharded_queryable.go
index 27988a289a7..63df1b67a29 100644
--- a/pkg/frontend/querymiddleware/sharded_queryable.go
+++ b/pkg/frontend/querymiddleware/sharded_queryable.go
@@ -34,15 +34,15 @@ var (
// shardedQueryable is an implementor of the Queryable interface.
type shardedQueryable struct {
- req Request
- handler Handler
+ req MetricsQueryRequest
+ handler MetricsQueryHandler
responseHeaders *responseHeadersTracker
}
// newShardedQueryable makes a new shardedQueryable. We expect a new queryable is created for each
// query, otherwise the response headers tracker doesn't work as expected, because it merges the
// headers for all queries run through the queryable and never reset them.
-func newShardedQueryable(req Request, next Handler) *shardedQueryable {
+func newShardedQueryable(req MetricsQueryRequest, next MetricsQueryHandler) *shardedQueryable {
return &shardedQueryable{
req: req,
handler: next,
@@ -65,8 +65,8 @@ func (q *shardedQueryable) getResponseHeaders() []*PrometheusResponseHeader {
// from the astmapper.EmbeddedQueriesMetricName metric label value and concurrently run embedded queries
// through the downstream handler.
type shardedQuerier struct {
- req Request
- handler Handler
+ req MetricsQueryRequest
+ handler MetricsQueryHandler
// Keep track of response headers received when running embedded queries.
responseHeaders *responseHeadersTracker
diff --git a/pkg/frontend/querymiddleware/sharded_queryable_test.go b/pkg/frontend/querymiddleware/sharded_queryable_test.go
index 0e5b0908bda..e72a60b7932 100644
--- a/pkg/frontend/querymiddleware/sharded_queryable_test.go
+++ b/pkg/frontend/querymiddleware/sharded_queryable_test.go
@@ -59,7 +59,7 @@ func TestShardedQuerier_Select(t *testing.T) {
// override handler func to assert new query has been substituted
q.handler = HandlerFunc(
- func(ctx context.Context, req Request) (Response, error) {
+ func(_ context.Context, req MetricsQueryRequest) (Response, error) {
require.Equal(t, `http_requests_total{cluster="prod"}`, req.GetQuery())
return expected, nil
},
@@ -218,7 +218,7 @@ func TestShardedQuerier_Select_ShouldConcurrentlyRunEmbeddedQueries(t *testing.T
downstreamWg := sync.WaitGroup{}
downstreamWg.Add(len(embeddedQueries))
- querier := mkShardedQuerier(HandlerFunc(func(ctx context.Context, req Request) (Response, error) {
+ querier := mkShardedQuerier(HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
// Wait until the downstream handler has been concurrently called for each embedded query.
downstreamWg.Done()
downstreamWg.Wait()
@@ -287,7 +287,7 @@ func TestShardedQueryable_GetResponseHeaders(t *testing.T) {
}, queryable.getResponseHeaders())
}
-func mkShardedQuerier(handler Handler) *shardedQuerier {
+func mkShardedQuerier(handler MetricsQueryHandler) *shardedQuerier {
return &shardedQuerier{req: &PrometheusRangeQueryRequest{}, handler: handler, responseHeaders: newResponseHeadersTracker()}
}
diff --git a/pkg/frontend/querymiddleware/split_and_cache.go b/pkg/frontend/querymiddleware/split_and_cache.go
index 67db76fcb21..fb9035ffe90 100644
--- a/pkg/frontend/querymiddleware/split_and_cache.go
+++ b/pkg/frontend/querymiddleware/split_and_cache.go
@@ -76,10 +76,10 @@ func newSplitAndCacheMiddlewareMetrics(reg prometheus.Registerer) *splitAndCache
return m
}
-// splitAndCacheMiddleware is a Middleware that can (optionally) split the query by interval
+// splitAndCacheMiddleware is a MetricsQueryMiddleware that can (optionally) split the query by interval
// and run split queries through the results cache.
type splitAndCacheMiddleware struct {
- next Handler
+ next MetricsQueryHandler
limits Limits
merger Merger
logger log.Logger
@@ -112,10 +112,10 @@ func newSplitAndCacheMiddleware(
extractor Extractor,
shouldCacheReq shouldCacheFn,
logger log.Logger,
- reg prometheus.Registerer) Middleware {
+ reg prometheus.Registerer) MetricsQueryMiddleware {
metrics := newSplitAndCacheMiddlewareMetrics(reg)
- return MiddlewareFunc(func(next Handler) Handler {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return &splitAndCacheMiddleware{
splitEnabled: splitEnabled,
cacheEnabled: cacheEnabled,
@@ -134,7 +134,7 @@ func newSplitAndCacheMiddleware(
})
}
-func (s *splitAndCacheMiddleware) Do(ctx context.Context, req Request) (Response, error) {
+func (s *splitAndCacheMiddleware) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) {
spanLog := spanlogger.FromContext(ctx, s.logger)
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
@@ -165,7 +165,7 @@ func (s *splitAndCacheMiddleware) Do(ctx context.Context, req Request) (Response
// Do not try to pick response from cache at all if the request is not cachable.
if cachable, reason := isRequestCachable(splitReq.orig, maxCacheTime, cacheUnalignedRequests, s.logger); !cachable {
level.Debug(spanLog).Log("msg", "skipping response cache as query is not cacheable", "query", splitReq.orig.GetQuery(), "reason", reason, "tenants", tenant.JoinTenantIDs(tenantIDs))
- splitReq.downstreamRequests = []Request{splitReq.orig}
+ splitReq.downstreamRequests = []MetricsQueryRequest{splitReq.orig}
s.metrics.queryResultCacheSkippedCount.WithLabelValues(reason).Inc()
continue
}
@@ -181,7 +181,7 @@ func (s *splitAndCacheMiddleware) Do(ctx context.Context, req Request) (Response
for lookupIdx, extents := range fetchedExtents {
if len(extents) == 0 {
// We just need to run the request as is because no part of it has been cached yet.
- lookupReqs[lookupIdx].downstreamRequests = []Request{lookupReqs[lookupIdx].orig}
+ lookupReqs[lookupIdx].downstreamRequests = []MetricsQueryRequest{lookupReqs[lookupIdx].orig}
continue
}
@@ -210,7 +210,7 @@ func (s *splitAndCacheMiddleware) Do(ctx context.Context, req Request) (Response
} else {
// Cache is disabled. We've just to execute the original request.
for _, splitReq := range splitReqs {
- splitReq.downstreamRequests = []Request{splitReq.orig}
+ splitReq.downstreamRequests = []MetricsQueryRequest{splitReq.orig}
}
}
@@ -304,8 +304,8 @@ func (s *splitAndCacheMiddleware) Do(ctx context.Context, req Request) (Response
return s.merger.MergeResponse(responses...)
}
-// splitRequestByInterval splits the given Request by configured interval. Returns the input request if splitting is disabled.
-func (s *splitAndCacheMiddleware) splitRequestByInterval(req Request) (splitRequests, error) {
+// splitRequestByInterval splits the given MetricsQueryRequest by configured interval. Returns the input request if splitting is disabled.
+func (s *splitAndCacheMiddleware) splitRequestByInterval(req MetricsQueryRequest) (splitRequests, error) {
if !s.splitEnabled {
return splitRequests{{orig: req}}, nil
}
@@ -468,7 +468,7 @@ func getTTLForExtent(now time.Time, ttl, ttlInOOOWindow, oooWindow time.Duration
// splitRequest holds information about a split request.
type splitRequest struct {
// The original split query.
- orig Request
+ orig MetricsQueryRequest
// The cache key for the request.
cacheKey string
@@ -481,7 +481,7 @@ type splitRequest struct {
// The requests/responses we send/receive to/from downstream. For a given request, its
// response is stored at the same index.
- downstreamRequests []Request
+ downstreamRequests []MetricsQueryRequest
downstreamResponses []Response
}
@@ -519,7 +519,7 @@ func (s *splitRequests) countDownstreamResponseBytes() int {
// prepareDownstreamRequests injects a unique ID and hints to all downstream requests and
// initialize downstream responses slice to have the same length of requests.
-func (s *splitRequests) prepareDownstreamRequests() []Request {
+func (s *splitRequests) prepareDownstreamRequests() []MetricsQueryRequest {
// Count the total number of downstream requests to run and build the hints we're going
// to attach to each request.
numDownstreamRequests := s.countDownstreamRequests()
@@ -532,7 +532,7 @@ func (s *splitRequests) prepareDownstreamRequests() []Request {
// ID intentionally start at 1 to detect any bug in case the default zero value is used.
nextReqID := int64(1)
- execReqs := make([]Request, 0, numDownstreamRequests)
+ execReqs := make([]MetricsQueryRequest, 0, numDownstreamRequests)
for _, splitReq := range *s {
for i := 0; i < len(splitReq.downstreamRequests); i++ {
splitReq.downstreamRequests[i] = splitReq.downstreamRequests[i].WithID(nextReqID).WithTotalQueriesHint(int32(numDownstreamRequests))
@@ -588,12 +588,12 @@ func (s *splitRequests) storeDownstreamResponses(responses []requestResponse) er
// requestResponse contains a request response and the respective request that was used.
type requestResponse struct {
- Request Request
+ Request MetricsQueryRequest
Response Response
}
// doRequests executes a list of requests in parallel.
-func doRequests(ctx context.Context, downstream Handler, reqs []Request) ([]requestResponse, error) {
+func doRequests(ctx context.Context, downstream MetricsQueryHandler, reqs []MetricsQueryRequest) ([]requestResponse, error) {
g, ctx := errgroup.WithContext(ctx)
mtx := sync.Mutex{}
resps := make([]requestResponse, 0, len(reqs))
@@ -626,14 +626,14 @@ func doRequests(ctx context.Context, downstream Handler, reqs []Request) ([]requ
return resps, g.Wait()
}
-func splitQueryByInterval(r Request, interval time.Duration) ([]Request, error) {
+func splitQueryByInterval(r MetricsQueryRequest, interval time.Duration) ([]MetricsQueryRequest, error) {
// Replace @ modifier function to their respective constant values in the query.
// This way subqueries will be evaluated at the same time as the parent query.
query, err := evaluateAtModifierFunction(r.GetQuery(), r.GetStart(), r.GetEnd())
if err != nil {
return nil, err
}
- var reqs []Request
+ var reqs []MetricsQueryRequest
for start := r.GetStart(); start <= r.GetEnd(); {
end := nextIntervalBoundary(start, r.GetStep(), interval)
if end > r.GetEnd() {
diff --git a/pkg/frontend/querymiddleware/split_and_cache_test.go b/pkg/frontend/querymiddleware/split_and_cache_test.go
index b577bee30d6..ccc7a660c7d 100644
--- a/pkg/frontend/querymiddleware/split_and_cache_test.go
+++ b/pkg/frontend/querymiddleware/split_and_cache_test.go
@@ -139,7 +139,7 @@ func TestSplitAndCacheMiddleware_SplitByInterval(t *testing.T) {
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
actualCount.Inc()
- req, err := codec.DecodeRequest(r.Context(), r)
+ req, err := codec.DecodeMetricsQueryRequest(r.Context(), r)
require.NoError(t, err)
if req.GetStart() == dayOneStartTime.Unix()*1000 {
@@ -181,7 +181,7 @@ func TestSplitAndCacheMiddleware_SplitByInterval(t *testing.T) {
)
// Chain middlewares together.
- middlewares := []Middleware{
+ middlewares := []MetricsQueryMiddleware{
newLimitsMiddleware(mockLimits{}, log.NewNopLogger()),
splitCacheMiddleware,
newAssertHintsMiddleware(t, &Hints{TotalQueries: 4}),
@@ -244,7 +244,7 @@ func TestSplitAndCacheMiddleware_ResultsCache(t *testing.T) {
mockLimits{maxCacheFreshness: 10 * time.Minute, resultsCacheTTL: resultsCacheTTL, resultsCacheOutOfOrderWindowTTL: resultsCacheLowerTTL},
newTestPrometheusCodec(),
cacheBackend,
- DefaultCacheKeyGenerator{Interval: day},
+ DefaultCacheKeyGenerator{interval: day},
PrometheusResponseExtractor{},
resultsCacheAlwaysEnabled,
log.NewNopLogger(),
@@ -293,13 +293,13 @@ func TestSplitAndCacheMiddleware_ResultsCache(t *testing.T) {
}
downstreamReqs := 0
- rc := mw.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ rc := mw.Wrap(HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
downstreamReqs++
return expectedResponse, nil
}))
step := int64(120 * 1000)
- req := Request(&PrometheusRangeQueryRequest{
+ req := MetricsQueryRequest(&PrometheusRangeQueryRequest{
Path: "/api/v1/query_range",
Start: parseTimeRFC3339(t, "2021-10-15T10:00:00Z").Unix() * 1000,
End: parseTimeRFC3339(t, "2021-10-15T12:00:00Z").Unix() * 1000,
@@ -376,7 +376,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ShouldNotLookupCacheIfStepIsNotAli
mockLimits{maxCacheFreshness: 10 * time.Minute},
newTestPrometheusCodec(),
cacheBackend,
- DefaultCacheKeyGenerator{Interval: day},
+ DefaultCacheKeyGenerator{interval: day},
PrometheusResponseExtractor{},
resultsCacheAlwaysEnabled,
log.NewNopLogger(),
@@ -425,12 +425,12 @@ func TestSplitAndCacheMiddleware_ResultsCache_ShouldNotLookupCacheIfStepIsNotAli
}
downstreamReqs := 0
- rc := mw.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ rc := mw.Wrap(HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
downstreamReqs++
return expectedResponse, nil
}))
- req := Request(&PrometheusRangeQueryRequest{
+ req := MetricsQueryRequest(&PrometheusRangeQueryRequest{
Path: "/api/v1/query_range",
Start: parseTimeRFC3339(t, "2021-10-15T10:00:00Z").Unix() * 1000,
End: parseTimeRFC3339(t, "2021-10-15T12:00:00Z").Unix() * 1000,
@@ -492,7 +492,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_EnabledCachingOfStepUnalignedReque
limits,
newTestPrometheusCodec(),
cacheBackend,
- DefaultCacheKeyGenerator{Interval: day},
+ DefaultCacheKeyGenerator{interval: day},
PrometheusResponseExtractor{},
resultsCacheAlwaysEnabled,
log.NewNopLogger(),
@@ -518,12 +518,12 @@ func TestSplitAndCacheMiddleware_ResultsCache_EnabledCachingOfStepUnalignedReque
}
downstreamReqs := 0
- rc := mw.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ rc := mw.Wrap(HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) {
downstreamReqs++
return expectedResponse, nil
}))
- req := Request(&PrometheusRangeQueryRequest{
+ req := MetricsQueryRequest(&PrometheusRangeQueryRequest{
Path: "/api/v1/query_range",
Start: parseTimeRFC3339(t, "2021-10-15T10:00:00Z").Unix() * 1000,
End: parseTimeRFC3339(t, "2021-10-15T12:00:00Z").Unix() * 1000,
@@ -643,7 +643,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ShouldNotCacheRequestEarlierThanMa
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
cacheBackend := cache.NewMockCache()
- keyGenerator := DefaultCacheKeyGenerator{Interval: day}
+ keyGenerator := DefaultCacheKeyGenerator{interval: day}
reg := prometheus.NewPedanticRegistry()
mw := newSplitAndCacheMiddleware(
@@ -661,7 +661,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ShouldNotCacheRequestEarlierThanMa
)
calls := 0
- rc := mw.Wrap(HandlerFunc(func(_ context.Context, r Request) (Response, error) {
+ rc := mw.Wrap(HandlerFunc(func(_ context.Context, r MetricsQueryRequest) (Response, error) {
calls++
// Check the downstream request. We only check the 1st request because the subsequent
@@ -675,7 +675,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ShouldNotCacheRequestEarlierThanMa
}))
ctx := user.InjectOrgID(context.Background(), userID)
- req := Request(&PrometheusRangeQueryRequest{
+ req := MetricsQueryRequest(&PrometheusRangeQueryRequest{
Path: "/api/v1/query_range",
Start: testData.queryStartTime.Unix() * 1000,
End: testData.queryEndTime.Unix() * 1000,
@@ -683,7 +683,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ShouldNotCacheRequestEarlierThanMa
Query: `{__name__=~".+"}`,
})
- // Request should result in a query.
+ // MetricsQueryRequest should result in a query.
resp, err := rc.Do(ctx, req)
require.NoError(t, err)
require.Equal(t, 1, calls)
@@ -813,7 +813,7 @@ func TestSplitAndCacheMiddleware_ResultsCacheFuzzy(t *testing.T) {
}
// Generate some random requests.
- reqs := make([]Request, 0, numQueries)
+ reqs := make([]MetricsQueryRequest, 0, numQueries)
for q := 0; q < numQueries; q++ {
// Generate a random time range within min/max time.
startTime := minTime.Add(time.Duration(rnd.Int63n(maxTime.Sub(minTime).Milliseconds())) * time.Millisecond)
@@ -864,7 +864,7 @@ func TestSplitAndCacheMiddleware_ResultsCacheFuzzy(t *testing.T) {
},
newTestPrometheusCodec(),
cache.NewMockCache(),
- DefaultCacheKeyGenerator{Interval: day},
+ DefaultCacheKeyGenerator{interval: day},
PrometheusResponseExtractor{},
resultsCacheAlwaysEnabled,
log.NewNopLogger(),
@@ -889,7 +889,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ExtentsEdgeCases(t *testing.T) {
now := time.Now().UnixMilli()
tests := map[string]struct {
- req Request
+ req MetricsQueryRequest
cachedExtents []Extent
expectedUpdatedExtents bool
expectedCachedExtents []Extent
@@ -1134,7 +1134,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ExtentsEdgeCases(t *testing.T) {
t.Run(testName, func(t *testing.T) {
ctx := user.InjectOrgID(context.Background(), userID)
cacheBackend := cache.NewInstrumentedMockCache()
- keyGenerator := DefaultCacheKeyGenerator{Interval: day}
+ keyGenerator := DefaultCacheKeyGenerator{interval: day}
mw := newSplitAndCacheMiddleware(
false, // No splitting.
@@ -1148,7 +1148,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ExtentsEdgeCases(t *testing.T) {
resultsCacheAlwaysEnabled,
log.NewNopLogger(),
prometheus.NewPedanticRegistry(),
- ).Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ ).Wrap(HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) {
return mkAPIResponse(req.GetStart(), req.GetEnd(), req.GetStep()), nil
})).(*splitAndCacheMiddleware)
mw.currentTime = func() time.Time { return time.UnixMilli(now) }
@@ -1192,7 +1192,7 @@ func TestSplitAndCacheMiddleware_StoreAndFetchCacheExtents(t *testing.T) {
},
newTestPrometheusCodec(),
cacheBackend,
- DefaultCacheKeyGenerator{Interval: day},
+ DefaultCacheKeyGenerator{interval: day},
PrometheusResponseExtractor{},
resultsCacheAlwaysEnabled,
log.NewNopLogger(),
@@ -1273,7 +1273,7 @@ func TestSplitAndCacheMiddleware_WrapMultipleTimes(t *testing.T) {
mockLimits{},
newTestPrometheusCodec(),
cache.NewMockCache(),
- DefaultCacheKeyGenerator{Interval: day},
+ DefaultCacheKeyGenerator{interval: day},
PrometheusResponseExtractor{},
resultsCacheAlwaysEnabled,
log.NewNopLogger(),
@@ -1289,7 +1289,7 @@ func TestSplitAndCacheMiddleware_WrapMultipleTimes(t *testing.T) {
func TestSplitRequests_prepareDownstreamRequests(t *testing.T) {
tests := map[string]struct {
input splitRequests
- expected []Request
+ expected []MetricsQueryRequest
}{
"should return an empty slice on no downstream requests": {
input: nil,
@@ -1297,11 +1297,11 @@ func TestSplitRequests_prepareDownstreamRequests(t *testing.T) {
},
"should inject ID and hints on downstream requests and return them": {
input: splitRequests{
- {downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 1}, &PrometheusRangeQueryRequest{Start: 2}}},
- {downstreamRequests: []Request{}},
- {downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 3}}},
+ {downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 1}, &PrometheusRangeQueryRequest{Start: 2}}},
+ {downstreamRequests: []MetricsQueryRequest{}},
+ {downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 3}}},
},
- expected: []Request{
+ expected: []MetricsQueryRequest{
(&PrometheusRangeQueryRequest{Start: 1}).WithID(1).WithTotalQueriesHint(3),
(&PrometheusRangeQueryRequest{Start: 2}).WithID(2).WithTotalQueriesHint(3),
(&PrometheusRangeQueryRequest{Start: 3}).WithID(3).WithTotalQueriesHint(3),
@@ -1335,24 +1335,24 @@ func TestSplitRequests_storeDownstreamResponses(t *testing.T) {
}{
"should do nothing on no downstream requests": {
requests: splitRequests{
- {downstreamRequests: []Request{}},
- {downstreamRequests: []Request{}},
+ {downstreamRequests: []MetricsQueryRequest{}},
+ {downstreamRequests: []MetricsQueryRequest{}},
},
responses: nil,
expected: splitRequests{
- {downstreamRequests: []Request{}},
- {downstreamRequests: []Request{}},
+ {downstreamRequests: []MetricsQueryRequest{}},
+ {downstreamRequests: []MetricsQueryRequest{}},
},
},
"should associate downstream responses to requests": {
requests: splitRequests{{
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
downstreamResponses: []Response{nil, nil},
}, {
- downstreamRequests: []Request{},
+ downstreamRequests: []MetricsQueryRequest{},
downstreamResponses: []Response{},
}, {
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
downstreamResponses: []Response{nil},
}},
responses: []requestResponse{{
@@ -1366,22 +1366,22 @@ func TestSplitRequests_storeDownstreamResponses(t *testing.T) {
Response: &PrometheusResponse{Status: "response-2"},
}},
expected: splitRequests{{
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
downstreamResponses: []Response{&PrometheusResponse{Status: "response-1"}, &PrometheusResponse{Status: "response-2"}},
}, {
- downstreamRequests: []Request{},
+ downstreamRequests: []MetricsQueryRequest{},
downstreamResponses: []Response{},
}, {
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
downstreamResponses: []Response{&PrometheusResponse{Status: "response-3"}},
}},
},
"should return error if a downstream response is missing": {
requests: splitRequests{{
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
downstreamResponses: []Response{nil, nil},
}, {
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
downstreamResponses: []Response{nil},
}},
responses: []requestResponse{{
@@ -1395,10 +1395,10 @@ func TestSplitRequests_storeDownstreamResponses(t *testing.T) {
},
"should return error if multiple downstream responses have the same ID": {
requests: splitRequests{{
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
downstreamResponses: []Response{nil, nil},
}, {
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
downstreamResponses: []Response{nil},
}},
responses: []requestResponse{{
@@ -1412,10 +1412,10 @@ func TestSplitRequests_storeDownstreamResponses(t *testing.T) {
},
"should return error if extra downstream responses are requested to be stored": {
requests: splitRequests{{
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 1, Id: 1}, &PrometheusRangeQueryRequest{Start: 2, Id: 2}},
downstreamResponses: []Response{nil, nil},
}, {
- downstreamRequests: []Request{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
+ downstreamRequests: []MetricsQueryRequest{&PrometheusRangeQueryRequest{Start: 3, Id: 3}},
downstreamResponses: []Response{nil},
}},
responses: []requestResponse{{
@@ -1503,8 +1503,8 @@ func jsonEncodePrometheusResponse(t *testing.T, res *PrometheusResponse) string
return string(encoded)
}
-func newAssertHintsMiddleware(t *testing.T, expected *Hints) Middleware {
- return MiddlewareFunc(func(next Handler) Handler {
+func newAssertHintsMiddleware(t *testing.T, expected *Hints) MetricsQueryMiddleware {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return &assertHintsMiddleware{
next: next,
t: t,
@@ -1514,26 +1514,26 @@ func newAssertHintsMiddleware(t *testing.T, expected *Hints) Middleware {
}
type assertHintsMiddleware struct {
- next Handler
+ next MetricsQueryHandler
t *testing.T
expected *Hints
}
-func (m *assertHintsMiddleware) Do(ctx context.Context, req Request) (Response, error) {
+func (m *assertHintsMiddleware) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) {
assert.Equal(m.t, m.expected, req.GetHints())
return m.next.Do(ctx, req)
}
type roundTripper struct {
- handler Handler
+ handler MetricsQueryHandler
codec Codec
}
// newRoundTripper merges a set of middlewares into an handler, then inject it into the `next` roundtripper
// using the codec to translate requests and responses.
-func newRoundTripper(next http.RoundTripper, codec Codec, logger log.Logger, middlewares ...Middleware) http.RoundTripper {
+func newRoundTripper(next http.RoundTripper, codec Codec, logger log.Logger, middlewares ...MetricsQueryMiddleware) http.RoundTripper {
return roundTripper{
- handler: MergeMiddlewares(middlewares...).Wrap(roundTripperHandler{
+ handler: MergeMetricsQueryMiddlewares(middlewares...).Wrap(roundTripperHandler{
logger: logger,
next: next,
codec: codec,
@@ -1543,7 +1543,7 @@ func newRoundTripper(next http.RoundTripper, codec Codec, logger log.Logger, mid
}
func (q roundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
- request, err := q.codec.DecodeRequest(r.Context(), r)
+ request, err := q.codec.DecodeMetricsQueryRequest(r.Context(), r)
if err != nil {
return nil, err
}
@@ -1602,41 +1602,41 @@ func TestNextIntervalBoundary(t *testing.T) {
func TestSplitQueryByInterval(t *testing.T) {
for i, tc := range []struct {
- input Request
- expected []Request
+ input MetricsQueryRequest
+ expected []MetricsQueryRequest
interval time.Duration
}{
{
input: &PrometheusRangeQueryRequest{Start: 0, End: 60 * 60 * seconds, Step: 15 * seconds, Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 0, End: 60 * 60 * seconds, Step: 15 * seconds, Query: "foo"},
},
interval: day,
},
{
input: &PrometheusRangeQueryRequest{Start: 0, End: 60 * 60 * seconds, Step: 15 * seconds, Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 0, End: 60 * 60 * seconds, Step: 15 * seconds, Query: "foo"},
},
interval: 3 * time.Hour,
},
{
input: &PrometheusRangeQueryRequest{Start: 0, End: 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 0, End: 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
},
interval: day,
},
{
input: &PrometheusRangeQueryRequest{Start: 0, End: 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 0, End: 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
},
interval: 3 * time.Hour,
},
{
input: &PrometheusRangeQueryRequest{Start: 0, End: 2 * 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo @ start()"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 0, End: (24 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo @ 0.000"},
&PrometheusRangeQueryRequest{Start: 24 * 3600 * seconds, End: 2 * 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo @ 0.000"},
},
@@ -1644,7 +1644,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: 0, End: 2 * 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 0, End: (3 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo"},
&PrometheusRangeQueryRequest{Start: 3 * 3600 * seconds, End: 2 * 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
},
@@ -1652,7 +1652,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: 3 * 3600 * seconds, End: 3 * 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 3 * 3600 * seconds, End: (24 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo"},
&PrometheusRangeQueryRequest{Start: 24 * 3600 * seconds, End: (2 * 24 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo"},
&PrometheusRangeQueryRequest{Start: 2 * 24 * 3600 * seconds, End: 3 * 24 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
@@ -1661,7 +1661,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: 2 * 3600 * seconds, End: 3 * 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: 2 * 3600 * seconds, End: (3 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo"},
&PrometheusRangeQueryRequest{Start: 3 * 3600 * seconds, End: (2 * 3 * 3600 * seconds) - (15 * seconds), Step: 15 * seconds, Query: "foo"},
&PrometheusRangeQueryRequest{Start: 2 * 3 * 3600 * seconds, End: 3 * 3 * 3600 * seconds, Step: 15 * seconds, Query: "foo"},
@@ -1670,14 +1670,14 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-14T23:48:00Z"), End: timeToMillis(t, "2021-10-15T00:03:00Z"), Step: 5 * time.Minute.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-14T23:48:00Z"), End: timeToMillis(t, "2021-10-15T00:03:00Z"), Step: 5 * time.Minute.Milliseconds(), Query: "foo"},
},
interval: day,
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-14T23:48:00Z"), End: timeToMillis(t, "2021-10-15T00:00:00Z"), Step: 6 * time.Minute.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-14T23:48:00Z"), End: timeToMillis(t, "2021-10-14T23:54:00Z"), Step: 6 * time.Minute.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T00:00:00Z"), End: timeToMillis(t, "2021-10-15T00:00:00Z"), Step: 6 * time.Minute.Milliseconds(), Query: "foo"},
},
@@ -1685,7 +1685,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-14T22:00:00Z"), End: timeToMillis(t, "2021-10-17T22:00:00Z"), Step: 24 * time.Hour.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-14T22:00:00Z"), End: timeToMillis(t, "2021-10-14T22:00:00Z"), Step: 24 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T22:00:00Z"), End: timeToMillis(t, "2021-10-15T22:00:00Z"), Step: 24 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-16T22:00:00Z"), End: timeToMillis(t, "2021-10-16T22:00:00Z"), Step: 24 * time.Hour.Milliseconds(), Query: "foo"},
@@ -1695,7 +1695,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T00:00:00Z"), End: timeToMillis(t, "2021-10-18T00:00:00Z"), Step: 24 * time.Hour.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T00:00:00Z"), End: timeToMillis(t, "2021-10-15T00:00:00Z"), Step: 24 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-16T00:00:00Z"), End: timeToMillis(t, "2021-10-16T00:00:00Z"), Step: 24 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-17T00:00:00Z"), End: timeToMillis(t, "2021-10-17T00:00:00Z"), Step: 24 * time.Hour.Milliseconds(), Query: "foo"},
@@ -1705,7 +1705,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T22:00:00Z"), End: timeToMillis(t, "2021-10-22T04:00:00Z"), Step: 30 * time.Hour.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T22:00:00Z"), End: timeToMillis(t, "2021-10-15T22:00:00Z"), Step: 30 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-17T04:00:00Z"), End: timeToMillis(t, "2021-10-17T04:00:00Z"), Step: 30 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-18T10:00:00Z"), End: timeToMillis(t, "2021-10-18T10:00:00Z"), Step: 30 * time.Hour.Milliseconds(), Query: "foo"},
@@ -1717,7 +1717,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T06:00:00Z"), End: timeToMillis(t, "2021-10-17T14:00:00Z"), Step: 12 * time.Hour.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T06:00:00Z"), End: timeToMillis(t, "2021-10-15T18:00:00Z"), Step: 12 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-16T06:00:00Z"), End: timeToMillis(t, "2021-10-16T18:00:00Z"), Step: 12 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-17T06:00:00Z"), End: timeToMillis(t, "2021-10-17T14:00:00Z"), Step: 12 * time.Hour.Milliseconds(), Query: "foo"},
@@ -1726,7 +1726,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T06:00:00Z"), End: timeToMillis(t, "2021-10-17T18:00:00Z"), Step: 12 * time.Hour.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T06:00:00Z"), End: timeToMillis(t, "2021-10-15T18:00:00Z"), Step: 12 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-16T06:00:00Z"), End: timeToMillis(t, "2021-10-16T18:00:00Z"), Step: 12 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-17T06:00:00Z"), End: timeToMillis(t, "2021-10-17T18:00:00Z"), Step: 12 * time.Hour.Milliseconds(), Query: "foo"},
@@ -1735,7 +1735,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T06:00:00Z"), End: timeToMillis(t, "2021-10-17T18:00:00Z"), Step: 10 * time.Hour.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T06:00:00Z"), End: timeToMillis(t, "2021-10-15T16:00:00Z"), Step: 10 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-16T02:00:00Z"), End: timeToMillis(t, "2021-10-16T22:00:00Z"), Step: 10 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-17T08:00:00Z"), End: timeToMillis(t, "2021-10-17T18:00:00Z"), Step: 10 * time.Hour.Milliseconds(), Query: "foo"},
@@ -1744,7 +1744,7 @@ func TestSplitQueryByInterval(t *testing.T) {
},
{
input: &PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T06:00:00Z"), End: timeToMillis(t, "2021-10-17T08:00:00Z"), Step: 10 * time.Hour.Milliseconds(), Query: "foo"},
- expected: []Request{
+ expected: []MetricsQueryRequest{
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-15T06:00:00Z"), End: timeToMillis(t, "2021-10-15T16:00:00Z"), Step: 10 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-16T02:00:00Z"), End: timeToMillis(t, "2021-10-16T22:00:00Z"), Step: 10 * time.Hour.Milliseconds(), Query: "foo"},
&PrometheusRangeQueryRequest{Start: timeToMillis(t, "2021-10-17T08:00:00Z"), End: timeToMillis(t, "2021-10-17T08:00:00Z"), Step: 10 * time.Hour.Milliseconds(), Query: "foo"},
diff --git a/pkg/frontend/querymiddleware/split_by_instant_interval.go b/pkg/frontend/querymiddleware/split_by_instant_interval.go
index 53e6c7ff8d7..fde1598defe 100644
--- a/pkg/frontend/querymiddleware/split_by_instant_interval.go
+++ b/pkg/frontend/querymiddleware/split_by_instant_interval.go
@@ -28,9 +28,9 @@ const (
skippedReasonMappingFailed = "mapping-failed"
)
-// splitInstantQueryByIntervalMiddleware is a Middleware that can (optionally) split the instant query by splitInterval
+// splitInstantQueryByIntervalMiddleware is a MetricsQueryMiddleware that can (optionally) split the instant query by splitInterval
type splitInstantQueryByIntervalMiddleware struct {
- next Handler
+ next MetricsQueryHandler
limits Limits
logger log.Logger
@@ -86,10 +86,10 @@ func newSplitInstantQueryByIntervalMiddleware(
limits Limits,
logger log.Logger,
engine *promql.Engine,
- registerer prometheus.Registerer) Middleware {
+ registerer prometheus.Registerer) MetricsQueryMiddleware {
metrics := newInstantQuerySplittingMetrics(registerer)
- return MiddlewareFunc(func(next Handler) Handler {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return &splitInstantQueryByIntervalMiddleware{
next: next,
limits: limits,
@@ -100,19 +100,19 @@ func newSplitInstantQueryByIntervalMiddleware(
})
}
-func (s *splitInstantQueryByIntervalMiddleware) Do(ctx context.Context, req Request) (Response, error) {
+func (s *splitInstantQueryByIntervalMiddleware) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) {
// Log the instant query and its timestamp in every error log, so that we have more information for debugging failures.
logger := log.With(s.logger, "query", req.GetQuery(), "query_timestamp", req.GetStart())
spanLog, ctx := spanlogger.NewWithLogger(ctx, logger, "splitInstantQueryByIntervalMiddleware.Do")
defer spanLog.Span.Finish()
- tenantsIds, err := tenant.TenantIDs(ctx)
+ tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, apierror.New(apierror.TypeBadData, err.Error())
}
- splitInterval := s.getSplitIntervalForQuery(tenantsIds, req, spanLog)
+ splitInterval := s.getSplitIntervalForQuery(tenantIDs, req, spanLog)
if splitInterval <= 0 {
spanLog.DebugLog("msg", "query splitting is disabled for this query or tenant")
return s.next.Do(ctx, req)
@@ -201,13 +201,13 @@ func (s *splitInstantQueryByIntervalMiddleware) Do(ctx context.Context, req Requ
}
// getSplitIntervalForQuery calculates and return the split interval that should be used to run the instant query.
-func (s *splitInstantQueryByIntervalMiddleware) getSplitIntervalForQuery(tenantsIds []string, r Request, spanLog *spanlogger.SpanLogger) time.Duration {
+func (s *splitInstantQueryByIntervalMiddleware) getSplitIntervalForQuery(tenantIDs []string, r MetricsQueryRequest, spanLog *spanlogger.SpanLogger) time.Duration {
// Check if splitting is disabled for the given request.
if r.GetOptions().InstantSplitDisabled {
return 0
}
- splitInterval := validation.SmallestPositiveNonZeroDurationPerTenant(tenantsIds, s.limits.SplitInstantQueriesByInterval)
+ splitInterval := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, s.limits.SplitInstantQueriesByInterval)
if splitInterval <= 0 {
return 0
}
@@ -217,7 +217,7 @@ func (s *splitInstantQueryByIntervalMiddleware) getSplitIntervalForQuery(tenants
splitInterval = time.Duration(r.GetOptions().InstantSplitInterval)
}
- spanLog.DebugLog("msg", "getting split instant query interval", "tenantsIds", tenantsIds, "split interval", splitInterval)
+ spanLog.DebugLog("msg", "getting split instant query interval", "tenantIDs", tenantIDs, "split interval", splitInterval)
return splitInterval
}
diff --git a/pkg/frontend/querymiddleware/split_by_instant_interval_test.go b/pkg/frontend/querymiddleware/split_by_instant_interval_test.go
index add48be1a3f..3d1bb322194 100644
--- a/pkg/frontend/querymiddleware/split_by_instant_interval_test.go
+++ b/pkg/frontend/querymiddleware/split_by_instant_interval_test.go
@@ -506,7 +506,7 @@ func TestInstantQuerySplittingCorrectness(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
- reqs := []Request{
+ reqs := []MetricsQueryRequest{
&PrometheusInstantQueryRequest{
Path: "/query",
Time: util.TimeToMillis(end),
diff --git a/pkg/frontend/querymiddleware/stats.go b/pkg/frontend/querymiddleware/stats.go
index 1453045f191..4e4c32e779f 100644
--- a/pkg/frontend/querymiddleware/stats.go
+++ b/pkg/frontend/querymiddleware/stats.go
@@ -25,10 +25,10 @@ type queryStatsMiddleware struct {
regexpMatcherCount prometheus.Counter
regexpMatcherOptimizedCount prometheus.Counter
consistencyCounter *prometheus.CounterVec
- next Handler
+ next MetricsQueryHandler
}
-func newQueryStatsMiddleware(reg prometheus.Registerer, engine *promql.Engine) Middleware {
+func newQueryStatsMiddleware(reg prometheus.Registerer, engine *promql.Engine) MetricsQueryMiddleware {
nonAlignedQueries := promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "cortex_query_frontend_non_step_aligned_queries_total",
Help: "Total queries sent that are not step aligned.",
@@ -46,7 +46,7 @@ func newQueryStatsMiddleware(reg prometheus.Registerer, engine *promql.Engine) M
Help: "Total number of queries that explicitly request a level of consistency.",
}, []string{"user", "consistency"})
- return MiddlewareFunc(func(next Handler) Handler {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return &queryStatsMiddleware{
engine: engine,
nonAlignedQueries: nonAlignedQueries,
@@ -58,7 +58,7 @@ func newQueryStatsMiddleware(reg prometheus.Registerer, engine *promql.Engine) M
})
}
-func (s queryStatsMiddleware) Do(ctx context.Context, req Request) (Response, error) {
+func (s queryStatsMiddleware) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) {
if !isRequestStepAligned(req) {
s.nonAlignedQueries.Inc()
}
@@ -70,7 +70,7 @@ func (s queryStatsMiddleware) Do(ctx context.Context, req Request) (Response, er
return s.next.Do(ctx, req)
}
-func (s queryStatsMiddleware) trackRegexpMatchers(req Request) {
+func (s queryStatsMiddleware) trackRegexpMatchers(req MetricsQueryRequest) {
expr, err := parser.ParseExpr(req.GetQuery())
if err != nil {
return
@@ -93,7 +93,7 @@ var queryStatsErrQueryable = &storage.MockQueryable{MockQuerier: &storage.MockQu
return storage.ErrSeriesSet(errors.New("cannot use query stats queryable for running queries"))
}}}
-func (s queryStatsMiddleware) populateQueryDetails(ctx context.Context, req Request) {
+func (s queryStatsMiddleware) populateQueryDetails(ctx context.Context, req MetricsQueryRequest) {
details := QueryDetailsFromContext(ctx)
if details == nil {
return
diff --git a/pkg/frontend/querymiddleware/stats_test.go b/pkg/frontend/querymiddleware/stats_test.go
index 86a185a1e30..84b3463c493 100644
--- a/pkg/frontend/querymiddleware/stats_test.go
+++ b/pkg/frontend/querymiddleware/stats_test.go
@@ -23,7 +23,7 @@ func Test_queryStatsMiddleware_Do(t *testing.T) {
const tenantID = "test"
type args struct {
ctx context.Context
- req Request
+ req MetricsQueryRequest
}
tests := []struct {
name string
diff --git a/pkg/frontend/querymiddleware/step_align.go b/pkg/frontend/querymiddleware/step_align.go
index f8f7bc5676a..0db07126970 100644
--- a/pkg/frontend/querymiddleware/step_align.go
+++ b/pkg/frontend/querymiddleware/step_align.go
@@ -18,7 +18,7 @@ import (
)
type stepAlignMiddleware struct {
- next Handler
+ next MetricsQueryHandler
limits Limits
logger log.Logger
aligned *prometheus.CounterVec
@@ -26,13 +26,13 @@ type stepAlignMiddleware struct {
// newStepAlignMiddleware creates a middleware that aligns the start and end of request to the step to
// improve the cacheability of the query results based on per-tenant configuration.
-func newStepAlignMiddleware(limits Limits, logger log.Logger, registerer prometheus.Registerer) Middleware {
+func newStepAlignMiddleware(limits Limits, logger log.Logger, registerer prometheus.Registerer) MetricsQueryMiddleware {
aligned := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{
Name: "cortex_query_frontend_queries_step_aligned_total",
Help: "Number of queries whose start or end times have been adjusted to be step-aligned.",
}, []string{"user"})
- return MiddlewareFunc(func(next Handler) Handler {
+ return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler {
return &stepAlignMiddleware{
next: next,
limits: limits,
@@ -42,7 +42,7 @@ func newStepAlignMiddleware(limits Limits, logger log.Logger, registerer prometh
})
}
-func (s *stepAlignMiddleware) Do(ctx context.Context, r Request) (Response, error) {
+func (s *stepAlignMiddleware) Do(ctx context.Context, r MetricsQueryRequest) (Response, error) {
tenants, err := tenant.TenantIDs(ctx)
if err != nil {
return s.next.Do(ctx, r)
@@ -75,9 +75,9 @@ func (s *stepAlignMiddleware) Do(ctx context.Context, r Request) (Response, erro
return s.next.Do(ctx, r)
}
-// isRequestStepAligned returns whether the Request start and end timestamps are aligned
+// isRequestStepAligned returns whether the MetricsQueryRequest start and end timestamps are aligned
// with the step.
-func isRequestStepAligned(req Request) bool {
+func isRequestStepAligned(req MetricsQueryRequest) bool {
if req.GetStep() == 0 {
return true
}
diff --git a/pkg/frontend/querymiddleware/step_align_test.go b/pkg/frontend/querymiddleware/step_align_test.go
index eb74be335b7..17a407eb8d5 100644
--- a/pkg/frontend/querymiddleware/step_align_test.go
+++ b/pkg/frontend/querymiddleware/step_align_test.go
@@ -52,7 +52,7 @@ func TestStepAlignMiddleware_SingleUser(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
var result *PrometheusRangeQueryRequest
- next := HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ next := HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) {
result = req.(*PrometheusRangeQueryRequest)
return nil, nil
})
@@ -136,7 +136,7 @@ func TestStepAlignMiddleware_MultipleUsers(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
var result *PrometheusRangeQueryRequest
- next := HandlerFunc(func(_ context.Context, req Request) (Response, error) {
+ next := HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) {
result = req.(*PrometheusRangeQueryRequest)
return nil, nil
})
@@ -154,7 +154,7 @@ func TestStepAlignMiddleware_MultipleUsers(t *testing.T) {
func TestIsRequestStepAligned(t *testing.T) {
tests := map[string]struct {
- req Request
+ req MetricsQueryRequest
expected bool
}{
"should return true if start and end are aligned to step": {
diff --git a/pkg/frontend/transport/handler_test.go b/pkg/frontend/transport/handler_test.go
index 2823d27229e..0a0e8ca2432 100644
--- a/pkg/frontend/transport/handler_test.go
+++ b/pkg/frontend/transport/handler_test.go
@@ -334,7 +334,7 @@ func TestHandler_Stop(t *testing.T) {
)
inProgress := make(chan int32)
var reqID atomic.Int32
- roundTripper := roundTripperFunc(func(req *http.Request) (*http.Response, error) {
+ roundTripper := roundTripperFunc(func(*http.Request) (*http.Response, error) {
id := reqID.Inc()
t.Logf("request %d sending its ID", id)
inProgress <- id
@@ -464,7 +464,7 @@ func TestHandler_LogsFormattedQueryDetails(t *testing.T) {
// the details aren't set by the query stats middleware if the request isn't a query
name: "not a query request",
requestFormFields: []string{},
- setQueryDetails: func(d *querymiddleware.QueryDetails) {},
+ setQueryDetails: func(*querymiddleware.QueryDetails) {},
expectedLoggedFields: map[string]string{},
expectedApproximateDurations: map[string]time.Duration{},
expectedMissingFields: []string{"length", "param_time", "time_since_param_start", "time_since_param_end"},
diff --git a/pkg/frontend/v1/frontend_test.go b/pkg/frontend/v1/frontend_test.go
index b380dc67a00..d4ade4357cd 100644
--- a/pkg/frontend/v1/frontend_test.go
+++ b/pkg/frontend/v1/frontend_test.go
@@ -47,7 +47,7 @@ const (
)
func TestFrontend(t *testing.T) {
- handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
_, err := w.Write([]byte("Hello World"))
require.NoError(t, err)
})
@@ -161,7 +161,7 @@ func TestFrontendCheckReady(t *testing.T) {
// the underlying query is correctly cancelled _and not retried_.
func TestFrontendCancel(t *testing.T) {
var tries atomic.Int32
- handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
<-r.Context().Done()
tries.Inc()
})
@@ -189,7 +189,7 @@ func TestFrontendCancel(t *testing.T) {
}
func TestFrontendMetricsCleanup(t *testing.T) {
- handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
_, err := w.Write([]byte("Hello World"))
require.NoError(t, err)
})
@@ -238,7 +238,7 @@ func TestFrontendStats(t *testing.T) {
tl := testLogger{}
- test := func(addr string, fr *Frontend) {
+ test := func(addr string, _ *Frontend) {
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/", addr), nil)
require.NoError(t, err)
err = user.InjectOrgIDIntoHTTPRequest(user.InjectOrgID(context.Background(), "1"), req)
diff --git a/pkg/frontend/v2/frontend_scheduler_adapter.go b/pkg/frontend/v2/frontend_scheduler_adapter.go
index fb12419a6dc..48fe23736cf 100644
--- a/pkg/frontend/v2/frontend_scheduler_adapter.go
+++ b/pkg/frontend/v2/frontend_scheduler_adapter.go
@@ -11,9 +11,11 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
+ apierror "github.com/grafana/mimir/pkg/api/error"
"github.com/grafana/mimir/pkg/frontend/querymiddleware"
"github.com/grafana/mimir/pkg/querier"
"github.com/grafana/mimir/pkg/scheduler/schedulerpb"
+ "github.com/grafana/mimir/pkg/util"
"github.com/grafana/mimir/pkg/util/validation"
)
@@ -65,21 +67,26 @@ func (a *frontendToSchedulerAdapter) extractAdditionalQueueDimensions(
return nil, err
}
+ reqValues, err := util.ParseRequestFormWithoutConsumingBody(httpRequest)
+ if err != nil {
+ return nil, apierror.New(apierror.TypeBadData, err.Error())
+ }
+
switch {
case querymiddleware.IsRangeQuery(httpRequest.URL.Path):
- start, end, _, err := querymiddleware.DecodeRangeQueryTimeParams(httpRequest)
+ start, end, _, err := querymiddleware.DecodeRangeQueryTimeParams(&reqValues)
if err != nil {
return nil, err
}
return a.queryComponentQueueDimensionFromTimeParams(tenantIDs, start, end, now), nil
case querymiddleware.IsInstantQuery(httpRequest.URL.Path):
- time, err := querymiddleware.DecodeInstantQueryTimeParams(httpRequest, time.Now)
+ time, err := querymiddleware.DecodeInstantQueryTimeParams(&reqValues, time.Now)
if err != nil {
return nil, err
}
return a.queryComponentQueueDimensionFromTimeParams(tenantIDs, time, time, now), nil
case querymiddleware.IsLabelsQuery(httpRequest.URL.Path):
- start, end, err := querymiddleware.DecodeLabelsQueryTimeParams(httpRequest)
+ start, end, err := querymiddleware.DecodeLabelsQueryTimeParams(&reqValues, true)
if err != nil {
return nil, err
}
diff --git a/pkg/frontend/v2/frontend_test.go b/pkg/frontend/v2/frontend_test.go
index 10de96a47af..238306893b2 100644
--- a/pkg/frontend/v2/frontend_test.go
+++ b/pkg/frontend/v2/frontend_test.go
@@ -227,7 +227,7 @@ func TestFrontendRetryEnqueue(t *testing.T) {
}
func TestFrontendTooManyRequests(t *testing.T) {
- f, _ := setupFrontend(t, nil, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ f, _ := setupFrontend(t, nil, func(*Frontend, *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.TOO_MANY_REQUESTS_PER_TENANT}
})
@@ -240,7 +240,7 @@ func TestFrontendTooManyRequests(t *testing.T) {
}
func TestFrontendEnqueueFailure(t *testing.T) {
- f, _ := setupFrontend(t, nil, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ f, _ := setupFrontend(t, nil, func(*Frontend, *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN}
})
@@ -630,7 +630,7 @@ func TestConfig_Validate(t *testing.T) {
expectedErr string
}{
"should pass with default config": {
- setup: func(cfg *Config) {},
+ setup: func(*Config) {},
},
"should pass if scheduler address is configured, and query-scheduler discovery mode is the default one": {
setup: func(cfg *Config) {
@@ -668,7 +668,7 @@ func TestWithClosingGrpcServer(t *testing.T) {
const frontendConcurrency = 1
const userID = "test"
- f, _ := setupFrontendWithConcurrencyAndServerOptions(t, nil, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
+ f, _ := setupFrontendWithConcurrencyAndServerOptions(t, nil, func(*Frontend, *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend {
return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.TOO_MANY_REQUESTS_PER_TENANT}
}, frontendConcurrency, grpc.KeepaliveParams(keepalive.ServerParameters{
MaxConnectionIdle: 100 * time.Millisecond,
diff --git a/pkg/ingester/active_series.go b/pkg/ingester/active_series.go
index f13f296b3cf..26218f11761 100644
--- a/pkg/ingester/active_series.go
+++ b/pkg/ingester/active_series.go
@@ -24,7 +24,7 @@ const activeSeriesMaxSizeBytes = 1 * 1024 * 1024
// series that match the given matchers.
func (i *Ingester) ActiveSeries(request *client.ActiveSeriesRequest, stream client.Ingester_ActiveSeriesServer) (err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return err
}
if err := i.checkReadOverloaded(); err != nil {
diff --git a/pkg/ingester/client/buffering_client_test.go b/pkg/ingester/client/buffering_client_test.go
index 0878f15bd04..743065bfc17 100644
--- a/pkg/ingester/client/buffering_client_test.go
+++ b/pkg/ingester/client/buffering_client_test.go
@@ -157,7 +157,7 @@ func TestWriteRequestBufferingClient_Push_WithMultipleMarshalCalls(t *testing.T)
func BenchmarkWriteRequestBufferingClient_Push(b *testing.B) {
bufferingClient := newBufferPoolingIngesterClient(&dummyIngesterClient{}, nil)
- bufferingClient.pushRawFn = func(ctx context.Context, conn *grpc.ClientConn, msg interface{}, opts ...grpc.CallOption) (*mimirpb.WriteResponse, error) {
+ bufferingClient.pushRawFn = func(_ context.Context, _ *grpc.ClientConn, msg interface{}, _ ...grpc.CallOption) (*mimirpb.WriteResponse, error) {
_, err := msg.(proto.Marshaler).Marshal()
return nil, err
}
diff --git a/pkg/ingester/client/circuitbreaker.go b/pkg/ingester/client/circuitbreaker.go
index 7a418f06beb..31ad338b40e 100644
--- a/pkg/ingester/client/circuitbreaker.go
+++ b/pkg/ingester/client/circuitbreaker.go
@@ -70,10 +70,10 @@ func NewCircuitBreaker(inst ring.InstanceDesc, cfg CircuitBreakerConfig, metrics
breaker := circuitbreaker.Builder[any]().
WithFailureRateThreshold(cfg.FailureThreshold, cfg.FailureExecutionThreshold, cfg.ThresholdingPeriod).
WithDelay(cfg.CooldownPeriod).
- OnFailure(func(event failsafe.ExecutionEvent[any]) {
+ OnFailure(func(failsafe.ExecutionEvent[any]) {
countError.Inc()
}).
- OnSuccess(func(event failsafe.ExecutionEvent[any]) {
+ OnSuccess(func(failsafe.ExecutionEvent[any]) {
countSuccess.Inc()
}).
OnClose(func(event circuitbreaker.StateChangedEvent) {
@@ -88,7 +88,7 @@ func NewCircuitBreaker(inst ring.InstanceDesc, cfg CircuitBreakerConfig, metrics
transitionHalfOpen.Inc()
level.Info(logger).Log("msg", "circuit breaker is half-open", "ingester", inst.Id, "previous", event.OldState, "current", event.NewState)
}).
- HandleIf(func(r any, err error) bool { return isFailure(err) }).
+ HandleIf(func(_ any, err error) bool { return isFailure(err) }).
Build()
executor := failsafe.NewExecutor[any](breaker)
diff --git a/pkg/ingester/client/circuitbreaker_test.go b/pkg/ingester/client/circuitbreaker_test.go
index cee718073dd..747d7877062 100644
--- a/pkg/ingester/client/circuitbreaker_test.go
+++ b/pkg/ingester/client/circuitbreaker_test.go
@@ -76,12 +76,12 @@ func perInstanceLimitError(t *testing.T) error {
func TestNewCircuitBreaker(t *testing.T) {
// gRPC invoker that does not return an error
- success := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
+ success := func(context.Context, string, interface{}, interface{}, *grpc.ClientConn, ...grpc.CallOption) error {
return nil
}
// gRPC invoker that returns an error that will be treated as an error by the circuit breaker
- failure := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
+ failure := func(context.Context, string, interface{}, interface{}, *grpc.ClientConn, ...grpc.CallOption) error {
return perInstanceLimitError(t)
}
diff --git a/pkg/ingester/errors.go b/pkg/ingester/errors.go
index 3e4534da1a3..be209398e4f 100644
--- a/pkg/ingester/errors.go
+++ b/pkg/ingester/errors.go
@@ -455,6 +455,43 @@ var _ ingesterError = perMetricMetadataLimitReachedError{}
// Ensure that perMetricMetadataLimitReachedError is an softError.
var _ softError = perMetricMetadataLimitReachedError{}
+// nativeHistogramValidationError indicates that native histogram bucket counts did not add up to the overall count.
+type nativeHistogramValidationError struct {
+ id globalerror.ID
+ originalErr error
+ seriesLabels []mimirpb.LabelAdapter
+ timestamp model.Time
+}
+
+func newNativeHistogramValidationError(id globalerror.ID, originalErr error, timestamp model.Time, seriesLabels []mimirpb.LabelAdapter) nativeHistogramValidationError {
+ return nativeHistogramValidationError{
+ id: id,
+ originalErr: originalErr,
+ seriesLabels: seriesLabels,
+ timestamp: timestamp,
+ }
+}
+
+func (e nativeHistogramValidationError) Error() string {
+ return e.id.Message(fmt.Sprintf("err: %v. timestamp=%s, series=%s",
+ e.originalErr,
+ e.timestamp.Time().UTC().Format(time.RFC3339Nano),
+ e.seriesLabels,
+ ))
+}
+
+func (e nativeHistogramValidationError) errorCause() mimirpb.ErrorCause {
+ return mimirpb.BAD_DATA
+}
+
+func (e nativeHistogramValidationError) soft() {}
+
+// Ensure that histogramBucketCountMismatchError is an ingesterError.
+var _ ingesterError = nativeHistogramValidationError{}
+
+// Ensure that histogramBucketCountMismatchError is an softError.
+var _ softError = nativeHistogramValidationError{}
+
// unavailableError is an ingesterError indicating that the ingester is unavailable.
type unavailableError struct {
state services.State
@@ -550,6 +587,7 @@ type ingesterErrSamplers struct {
maxMetadataPerMetricLimitExceeded *log.Sampler
maxSeriesPerUserLimitExceeded *log.Sampler
maxMetadataPerUserLimitExceeded *log.Sampler
+ nativeHistogramValidationError *log.Sampler
}
func newIngesterErrSamplers(freq int64) ingesterErrSamplers {
@@ -563,6 +601,7 @@ func newIngesterErrSamplers(freq int64) ingesterErrSamplers {
log.NewSampler(freq),
log.NewSampler(freq),
log.NewSampler(freq),
+ log.NewSampler(freq),
}
}
diff --git a/pkg/ingester/errors_test.go b/pkg/ingester/errors_test.go
index b276cbe06ba..71eef92d419 100644
--- a/pkg/ingester/errors_test.go
+++ b/pkg/ingester/errors_test.go
@@ -314,7 +314,9 @@ func TestErrorWithStatus(t *testing.T) {
if data.doNotLog {
var optional middleware.OptionalLogging
require.ErrorAs(t, errWithStatus, &optional)
- require.False(t, optional.ShouldLog(context.Background(), 0))
+
+ shouldLog, _ := optional.ShouldLog(context.Background())
+ require.False(t, shouldLog)
}
})
}
@@ -548,7 +550,9 @@ func TestMapPushErrorToErrorWithStatus(t *testing.T) {
if tc.doNotLogExpected {
var doNotLogError middleware.DoNotLogError
require.ErrorAs(t, handledErr, &doNotLogError)
- require.False(t, doNotLogError.ShouldLog(context.Background(), 0))
+
+ shouldLog, _ := doNotLogError.ShouldLog(context.Background())
+ require.False(t, shouldLog)
}
})
}
@@ -723,7 +727,9 @@ func TestMapPushErrorToErrorWithHTTPOrGRPCStatus(t *testing.T) {
if tc.doNotLogExpected {
var doNotLogError middleware.DoNotLogError
require.ErrorAs(t, handledErr, &doNotLogError)
- require.False(t, doNotLogError.ShouldLog(context.Background(), 0))
+
+ shouldLog, _ := doNotLogError.ShouldLog(context.Background())
+ require.False(t, shouldLog)
}
})
}
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index ada9bb0c6ff..9c29a3a4f44 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -98,13 +98,14 @@ const (
instanceIngestionRateTickInterval = time.Second
// Reasons for discarding samples
- reasonSampleOutOfOrder = "sample-out-of-order"
- reasonSampleTooOld = "sample-too-old"
- reasonSampleTooFarInFuture = "sample-too-far-in-future"
- reasonNewValueForTimestamp = "new-value-for-timestamp"
- reasonSampleOutOfBounds = "sample-out-of-bounds"
- reasonPerUserSeriesLimit = "per_user_series_limit"
- reasonPerMetricSeriesLimit = "per_metric_series_limit"
+ reasonSampleOutOfOrder = "sample-out-of-order"
+ reasonSampleTooOld = "sample-too-old"
+ reasonSampleTooFarInFuture = "sample-too-far-in-future"
+ reasonNewValueForTimestamp = "new-value-for-timestamp"
+ reasonSampleOutOfBounds = "sample-out-of-bounds"
+ reasonPerUserSeriesLimit = "per_user_series_limit"
+ reasonPerMetricSeriesLimit = "per_metric_series_limit"
+ reasonInvalidNativeHistogram = "invalid-native-histogram"
replicationFactorStatsName = "ingester_replication_factor"
ringStoreStatsName = "ingester_ring_store"
@@ -211,6 +212,9 @@ type Config struct {
// This config is dynamically injected because defined outside the ingester config.
IngestStorageConfig ingest.Config `yaml:"-"`
+
+ // This config can be overridden in tests.
+ limitMetricsUpdatePeriod time.Duration `yaml:"-"`
}
// RegisterFlags adds the flags required to config this to the given FlagSet
@@ -240,6 +244,9 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) {
// the default behaviour of Mimir should be as this flag were set to true.
// TODO: Remove in Mimir 2.14.0
f.BoolVar(&cfg.DeprecatedReturnOnlyGRPCErrors, deprecatedReturnOnlyGRPCErrorsFlag, true, "When enabled only gRPC errors will be returned by the ingester.")
+
+ // Hardcoded config (can only be overridden in tests).
+ cfg.limitMetricsUpdatePeriod = time.Second * 15
}
func (cfg *Config) Validate(logger log.Logger) error {
@@ -285,11 +292,14 @@ type Ingester struct {
metrics *ingesterMetrics
logger log.Logger
- lifecycler *ring.Lifecycler
- limits *validation.Overrides
- limiter *Limiter
- subservicesWatcher *services.FailureWatcher
- ownedSeriesService *ownedSeriesService
+ lifecycler *ring.Lifecycler
+ limits *validation.Overrides
+ limiter *Limiter
+ subservicesWatcher *services.FailureWatcher
+ ownedSeriesService *ownedSeriesService
+ compactionService services.Service
+ metricsUpdaterService services.Service
+ metadataPurgerService services.Service
// Mimir blocks storage.
tsdbsMtx sync.RWMutex
@@ -303,7 +313,9 @@ type Ingester struct {
// Metrics shared across all per-tenant shippers.
shipperMetrics *shipperMetrics
- subservices *services.Manager
+ subservicesForPartitionReplay *services.Manager
+ subservicesAfterIngesterRingLifecycler *services.Manager
+
activeGroups *util.ActiveGroupsCleanupService
tsdbMetrics *tsdbMetrics
@@ -469,7 +481,22 @@ func New(cfg Config, limits *validation.Overrides, ingestersRing ring.ReadRing,
i.subservicesWatcher.WatchService(i.ownedSeriesService)
}
- i.BasicService = services.NewBasicService(i.starting, i.updateLoop, i.stopping)
+ // Init compaction service, responsible to periodically run TSDB head compactions.
+ i.compactionService = services.NewBasicService(nil, i.compactionServiceRunning, nil)
+ i.subservicesWatcher.WatchService(i.compactionService)
+
+ // Init metrics updater service, responsible to periodically update ingester metrics and stats.
+ i.metricsUpdaterService = services.NewBasicService(nil, i.metricsUpdaterServiceRunning, nil)
+ i.subservicesWatcher.WatchService(i.metricsUpdaterService)
+
+ // Init metadata purger service, responsible to periodically delete metrics metadata past their retention period.
+ i.metadataPurgerService = services.NewTimerService(metadataPurgePeriod, nil, func(context.Context) error {
+ i.purgeUserMetricsMetadata()
+ return nil
+ }, nil)
+ i.subservicesWatcher.WatchService(i.metadataPurgerService)
+
+ i.BasicService = services.NewBasicService(i.starting, i.ingesterRunning, i.stopping)
return i, nil
}
@@ -546,6 +573,22 @@ func (i *Ingester) starting(ctx context.Context) (err error) {
}
}
+ // Start the following services before starting the ingest storage reader, in order to have them
+ // running while replaying the partition (if ingest storage is enabled).
+ i.subservicesForPartitionReplay, err = createManagerThenStartAndAwaitHealthy(ctx, i.compactionService, i.metricsUpdaterService, i.metadataPurgerService)
+ if err != nil {
+ return errors.Wrap(err, "failed to start ingester subservices before partition reader")
+ }
+
+ // When ingest storage is enabled, we have to make sure that reader catches up replaying the partition
+ // BEFORE the ingester ring lifecycler is started, because once the ingester ring lifecycler will start
+ // it will switch the ingester state in the ring to ACTIVE.
+ if i.ingestReader != nil {
+ if err := services.StartAndAwaitRunning(ctx, i.ingestReader); err != nil {
+ return errors.Wrap(err, "failed to start partition reader")
+ }
+ }
+
// Important: we want to keep lifecycler running until we ask it to stop, so we need to give it independent context
if err := i.lifecycler.StartAsync(context.Background()); err != nil {
return errors.Wrap(err, "failed to start lifecycler")
@@ -554,11 +597,8 @@ func (i *Ingester) starting(ctx context.Context) (err error) {
return errors.Wrap(err, "failed to start lifecycler")
}
- // let's start the rest of subservices via manager
- servs := []services.Service(nil)
-
- compactionService := services.NewBasicService(nil, i.compactionLoop, nil)
- servs = append(servs, compactionService)
+ // Finally we start all services that should run after the ingester ring lifecycler.
+ var servs []services.Service
if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() {
shippingService := services.NewBasicService(nil, i.shipBlocksLoop, nil)
@@ -578,19 +618,18 @@ func (i *Ingester) starting(ctx context.Context) (err error) {
servs = append(servs, i.utilizationBasedLimiter)
}
- if i.ingestReader != nil {
- servs = append(servs, i.ingestReader)
- }
-
if i.ingestPartitionLifecycler != nil {
servs = append(servs, i.ingestPartitionLifecycler)
}
- i.subservices, err = services.NewManager(servs...)
- if err == nil {
- err = services.StartManagerAndAwaitHealthy(ctx, i.subservices)
+ // Since subservices are conditional, We add an idle service if there are no subservices to
+ // guarantee there's at least 1 service to run otherwise the service manager fails to start.
+ if len(servs) == 0 {
+ servs = append(servs, services.NewIdleService(nil, nil))
}
- return errors.Wrap(err, "failed to start ingester components")
+
+ i.subservicesAfterIngesterRingLifecycler, err = createManagerThenStartAndAwaitHealthy(ctx, servs...)
+ return errors.Wrap(err, "failed to start ingester subservices after ingester ring lifecycler")
}
func (i *Ingester) stoppingForFlusher(_ error) error {
@@ -601,6 +640,12 @@ func (i *Ingester) stoppingForFlusher(_ error) error {
}
func (i *Ingester) stopping(_ error) error {
+ if i.ingestReader != nil {
+ if err := services.StopAndAwaitTerminated(context.Background(), i.ingestReader); err != nil {
+ level.Warn(i.logger).Log("msg", "failed to stop partition reader", "err", err)
+ }
+ }
+
if i.ownedSeriesService != nil {
err := services.StopAndAwaitTerminated(context.Background(), i.ownedSeriesService)
if err != nil {
@@ -609,7 +654,15 @@ func (i *Ingester) stopping(_ error) error {
}
}
- if err := services.StopManagerAndAwaitStopped(context.Background(), i.subservices); err != nil {
+ // Stop subservices.
+ i.subservicesForPartitionReplay.StopAsync()
+ i.subservicesAfterIngesterRingLifecycler.StopAsync()
+
+ if err := i.subservicesForPartitionReplay.AwaitStopped(context.Background()); err != nil {
+ level.Warn(i.logger).Log("msg", "failed to stop ingester subservices", "err", err)
+ }
+
+ if err := i.subservicesAfterIngesterRingLifecycler.AwaitStopped(context.Background()); err != nil {
level.Warn(i.logger).Log("msg", "failed to stop ingester subservices", "err", err)
}
@@ -630,10 +683,26 @@ func (i *Ingester) stopping(_ error) error {
return nil
}
-func (i *Ingester) updateLoop(ctx context.Context) error {
+func (i *Ingester) ingesterRunning(ctx context.Context) error {
+ tsdbUpdateTicker := time.NewTicker(i.cfg.TSDBConfigUpdatePeriod)
+ defer tsdbUpdateTicker.Stop()
+ for {
+ select {
+ case <-tsdbUpdateTicker.C:
+ i.applyTSDBSettings()
+ case <-ctx.Done():
+ return nil
+ case err := <-i.subservicesWatcher.Chan():
+ return errors.Wrap(err, "ingester subservice failed")
+ }
+ }
+}
+
+// metricsUpdaterServiceRunning is the running function for the internal metrics updater service.
+func (i *Ingester) metricsUpdaterServiceRunning(ctx context.Context) error {
// Launch a dedicated goroutine for inflightRequestsTicker
- // to ensure it operates independently, unaffected by delays from other logics in updateLoop.
+ // to ensure it operates independently, unaffected by delays from other logics in this function.
go func() {
inflightRequestsTicker := time.NewTicker(250 * time.Millisecond)
defer inflightRequestsTicker.Stop()
@@ -654,9 +723,6 @@ func (i *Ingester) updateLoop(ctx context.Context) error {
ingestionRateTicker := time.NewTicker(instanceIngestionRateTickInterval)
defer ingestionRateTicker.Stop()
- tsdbUpdateTicker := time.NewTicker(i.cfg.TSDBConfigUpdatePeriod)
- defer tsdbUpdateTicker.Stop()
-
var activeSeriesTickerChan <-chan time.Time
if i.cfg.ActiveSeriesMetrics.Enabled {
t := time.NewTicker(i.cfg.ActiveSeriesMetrics.UpdatePeriod)
@@ -664,20 +730,14 @@ func (i *Ingester) updateLoop(ctx context.Context) error {
defer t.Stop()
}
- // Similarly to the above, this is a hardcoded value.
- metadataPurgeTicker := time.NewTicker(metadataPurgePeriod)
- defer metadataPurgeTicker.Stop()
-
usageStatsUpdateTicker := time.NewTicker(usageStatsUpdateInterval)
defer usageStatsUpdateTicker.Stop()
- limitMetricsUpdateTicker := time.NewTicker(time.Second * 15)
+ limitMetricsUpdateTicker := time.NewTicker(i.cfg.limitMetricsUpdatePeriod)
defer limitMetricsUpdateTicker.Stop()
for {
select {
- case <-metadataPurgeTicker.C:
- i.purgeUserMetricsMetadata()
case <-ingestionRateTicker.C:
i.ingestionRate.Tick()
case <-rateUpdateTicker.C:
@@ -687,18 +747,14 @@ func (i *Ingester) updateLoop(ctx context.Context) error {
db.ingestedRuleSamples.Tick()
}
i.tsdbsMtx.RUnlock()
- case <-tsdbUpdateTicker.C:
- i.applyTSDBSettings()
case <-activeSeriesTickerChan:
i.updateActiveSeries(time.Now())
case <-usageStatsUpdateTicker.C:
i.updateUsageStats()
case <-limitMetricsUpdateTicker.C:
- i.updateMetrics()
+ i.updateLimitMetrics()
case <-ctx.Done():
return nil
- case err := <-i.subservicesWatcher.Chan():
- return errors.Wrap(err, "ingester subservice failed")
}
}
}
@@ -849,7 +905,7 @@ func (i *Ingester) applyTSDBSettings() {
}
}
-func (i *Ingester) updateMetrics() {
+func (i *Ingester) updateLimitMetrics() {
for _, userID := range i.getTSDBUsers() {
db := i.getTSDB(userID)
if db == nil {
@@ -878,17 +934,18 @@ type extendedAppender interface {
}
type pushStats struct {
- succeededSamplesCount int
- failedSamplesCount int
- succeededExemplarsCount int
- failedExemplarsCount int
- sampleOutOfBoundsCount int
- sampleOutOfOrderCount int
- sampleTooOldCount int
- sampleTooFarInFutureCount int
- newValueForTimestampCount int
- perUserSeriesLimitCount int
- perMetricSeriesLimitCount int
+ succeededSamplesCount int
+ failedSamplesCount int
+ succeededExemplarsCount int
+ failedExemplarsCount int
+ sampleOutOfBoundsCount int
+ sampleOutOfOrderCount int
+ sampleTooOldCount int
+ sampleTooFarInFutureCount int
+ newValueForTimestampCount int
+ perUserSeriesLimitCount int
+ perMetricSeriesLimitCount int
+ invalidNativeHistogramCount int
}
type ctxKey int
@@ -921,7 +978,7 @@ func (i *Ingester) FinishPushRequest(ctx context.Context) {
//
// The shouldFinish flag tells if the caller must call finish on this request. If not, there is already someone in the call stack who will do that.
func (i *Ingester) startPushRequest(ctx context.Context, reqSize int64) (_ context.Context, shouldFinish bool, err error) {
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForPush(); err != nil {
return nil, false, err
}
@@ -1148,6 +1205,9 @@ func (i *Ingester) updateMetricsFromPushStats(userID string, group string, stats
if stats.perMetricSeriesLimitCount > 0 {
discarded.perMetricSeriesLimit.WithLabelValues(userID, group).Add(float64(stats.perMetricSeriesLimitCount))
}
+ if stats.invalidNativeHistogramCount > 0 {
+ discarded.invalidNativeHistogram.WithLabelValues(userID, group).Add(float64(stats.invalidNativeHistogramCount))
+ }
if stats.succeededSamplesCount > 0 {
i.ingestionRate.Add(int64(stats.succeededSamplesCount))
@@ -1223,6 +1283,38 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre
return newPerMetricSeriesLimitReachedError(i.limiter.limits.MaxGlobalSeriesPerMetric(userID), labels)
})
return true
+
+ // Map TSDB native histogram validation errors to soft errors.
+ case errors.Is(err, histogram.ErrHistogramCountMismatch):
+ stats.invalidNativeHistogramCount++
+ updateFirstPartial(i.errorSamplers.nativeHistogramValidationError, func() softError {
+ return newNativeHistogramValidationError(globalerror.NativeHistogramCountMismatch, err, model.Time(timestamp), labels)
+ })
+ return true
+ case errors.Is(err, histogram.ErrHistogramCountNotBigEnough):
+ stats.invalidNativeHistogramCount++
+ updateFirstPartial(i.errorSamplers.nativeHistogramValidationError, func() softError {
+ return newNativeHistogramValidationError(globalerror.NativeHistogramCountNotBigEnough, err, model.Time(timestamp), labels)
+ })
+ return true
+ case errors.Is(err, histogram.ErrHistogramNegativeBucketCount):
+ stats.invalidNativeHistogramCount++
+ updateFirstPartial(i.errorSamplers.nativeHistogramValidationError, func() softError {
+ return newNativeHistogramValidationError(globalerror.NativeHistogramNegativeBucketCount, err, model.Time(timestamp), labels)
+ })
+ return true
+ case errors.Is(err, histogram.ErrHistogramSpanNegativeOffset):
+ stats.invalidNativeHistogramCount++
+ updateFirstPartial(i.errorSamplers.nativeHistogramValidationError, func() softError {
+ return newNativeHistogramValidationError(globalerror.NativeHistogramSpanNegativeOffset, err, model.Time(timestamp), labels)
+ })
+ return true
+ case errors.Is(err, histogram.ErrHistogramSpansBucketsMismatch):
+ stats.invalidNativeHistogramCount++
+ updateFirstPartial(i.errorSamplers.nativeHistogramValidationError, func() softError {
+ return newNativeHistogramValidationError(globalerror.NativeHistogramSpansBucketsMismatch, err, model.Time(timestamp), labels)
+ })
+ return true
}
return false
}
@@ -1453,7 +1545,7 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre
func (i *Ingester) QueryExemplars(ctx context.Context, req *client.ExemplarQueryRequest) (resp *client.ExemplarQueryResponse, err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return nil, err
}
if err := i.checkReadOverloaded(); err != nil {
@@ -1517,7 +1609,7 @@ func (i *Ingester) QueryExemplars(ctx context.Context, req *client.ExemplarQuery
func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesRequest) (resp *client.LabelValuesResponse, err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return nil, err
}
if err := i.checkReadOverloaded(); err != nil {
@@ -1563,7 +1655,7 @@ func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesReque
func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest) (resp *client.LabelNamesResponse, err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return nil, err
}
if err := i.checkReadOverloaded(); err != nil {
@@ -1610,7 +1702,7 @@ func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest
// MetricsForLabelMatchers implements IngesterServer.
func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.MetricsForLabelMatchersRequest) (resp *client.MetricsForLabelMatchersResponse, err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return nil, err
}
if err := i.checkReadOverloaded(); err != nil {
@@ -1687,7 +1779,7 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr
func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest) (resp *client.UserStatsResponse, err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return nil, err
}
if err := i.checkReadOverloaded(); err != nil {
@@ -1719,7 +1811,7 @@ func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest)
// because the purpose of this function is to show a snapshot of the live ingester's state.
func (i *Ingester) AllUserStats(_ context.Context, req *client.UserStatsRequest) (resp *client.UsersStatsResponse, err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return nil, err
}
@@ -1750,7 +1842,7 @@ const labelNamesAndValuesTargetSizeBytes = 1 * 1024 * 1024
func (i *Ingester) LabelNamesAndValues(request *client.LabelNamesAndValuesRequest, stream client.Ingester_LabelNamesAndValuesServer) (err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return err
}
if err := i.checkReadOverloaded(); err != nil {
@@ -1785,7 +1877,7 @@ func (i *Ingester) LabelNamesAndValues(request *client.LabelNamesAndValuesReques
var valueFilter func(name, value string) (bool, error)
switch request.GetCountMethod() {
case client.IN_MEMORY:
- valueFilter = func(name, value string) (bool, error) {
+ valueFilter = func(string, string) (bool, error) {
return true, nil
}
case client.ACTIVE:
@@ -1805,7 +1897,7 @@ const labelValuesCardinalityTargetSizeBytes = 1 * 1024 * 1024
func (i *Ingester) LabelValuesCardinality(req *client.LabelValuesCardinalityRequest, srv client.Ingester_LabelValuesCardinalityServer) (err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return err
}
if err := i.checkReadOverloaded(); err != nil {
@@ -1892,7 +1984,7 @@ const queryStreamBatchMessageSize = 1 * 1024 * 1024
// QueryStream streams metrics from a TSDB. This implements the client.IngesterServer interface
func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_QueryStreamServer) (err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return err
}
if err := i.checkReadOverloaded(); err != nil {
@@ -2893,33 +2985,38 @@ func (i *Ingester) shipBlocks(ctx context.Context, allowed *util.AllowedTenants)
})
}
-func (i *Ingester) compactionLoop(ctx context.Context) error {
+// compactionServiceRunning is the running function of internal service responsible to periodically
+// compact TSDB Head.
+func (i *Ingester) compactionServiceRunning(ctx context.Context) error {
// At ingester startup, spread the first compaction over the configured compaction
// interval. Then, the next compactions will happen at a regular interval. This logic
// helps to have different ingesters running the compaction at a different time,
// effectively spreading the compactions over the configured interval.
- firstHeadCompactionInterval := i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval
- if i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIntervalJitterEnabled {
- firstHeadCompactionInterval = util.DurationWithNegativeJitter(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval, 1)
- }
-
- ticker := time.NewTicker(firstHeadCompactionInterval)
- defer ticker.Stop()
- tickerRunOnce := false
+ firstInterval, standardInterval := i.compactionServiceInterval()
+ stopTicker, tickerChan := util.NewVariableTicker(firstInterval, standardInterval)
+ defer func() {
+ // We call stopTicker() from an anonymous function because the stopTicker()
+ // reference may change during the lifetime of compactionServiceRunning().
+ stopTicker()
+ }()
for ctx.Err() == nil {
select {
- case <-ticker.C:
+ case <-tickerChan:
// The forcedCompactionMaxTime has no meaning because force=false.
i.compactBlocks(ctx, false, 0, nil)
// Check if any TSDB Head should be compacted to reduce the number of in-memory series.
i.compactBlocksToReduceInMemorySeries(ctx, time.Now())
- // Run it at a regular (configured) interval after the first compaction.
- if !tickerRunOnce {
- ticker.Reset(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval)
- tickerRunOnce = true
+ // Check if the desired interval has changed. We only compare the standard interval
+ // before the first interval may be random due to jittering.
+ if newFirstInterval, newStandardInterval := i.compactionServiceInterval(); standardInterval != newStandardInterval {
+ // Stop the previous ticker before creating a new one.
+ stopTicker()
+
+ standardInterval = newStandardInterval
+ stopTicker, tickerChan = util.NewVariableTicker(newFirstInterval, newStandardInterval)
}
case req := <-i.forceCompactTrigger:
@@ -2934,9 +3031,30 @@ func (i *Ingester) compactionLoop(ctx context.Context) error {
return nil
}
+// compactionServiceInterval returns how frequently the TSDB Head should be checked for compaction.
+// The returned standardInterval is guaranteed to have no jittering applied.
+// The returned intervals may change over time, depending on the ingester service state.
+func (i *Ingester) compactionServiceInterval() (firstInterval, standardInterval time.Duration) {
+ if i.State() == services.Starting {
+ // Trigger TSDB Head compaction frequently when starting up, because we may replay data from the partition
+ // if ingest storage is enabled.
+ standardInterval = min(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIntervalWhileStarting, i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval)
+ } else {
+ standardInterval = i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval
+ }
+
+ if i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIntervalJitterEnabled {
+ firstInterval = util.DurationWithNegativeJitter(standardInterval, 1)
+ } else {
+ firstInterval = standardInterval
+ }
+
+ return
+}
+
// Compacts all compactable blocks. Force flag will force compaction even if head is not compactable yet.
func (i *Ingester) compactBlocks(ctx context.Context, force bool, forcedCompactionMaxTime int64, allowed *util.AllowedTenants) {
- _ = concurrency.ForEachUser(ctx, i.getTSDBUsers(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(ctx context.Context, userID string) error {
+ _ = concurrency.ForEachUser(ctx, i.getTSDBUsers(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(_ context.Context, userID string) error {
if !allowed.IsAllowed(userID) {
return nil
}
@@ -3234,6 +3352,16 @@ const (
// Blocks version of Flush handler. It force-compacts blocks, and triggers shipping.
func (i *Ingester) FlushHandler(w http.ResponseWriter, r *http.Request) {
+ // Don't allow callers to flush TSDB while we're in the middle of starting or shutting down.
+ if ingesterState := i.State(); ingesterState != services.Running {
+ err := newUnavailableError(ingesterState)
+ level.Warn(i.logger).Log("msg", "flushing TSDB blocks is not allowed", "err", err)
+
+ w.WriteHeader(http.StatusServiceUnavailable)
+ _, _ = w.Write([]byte(err.Error()))
+ return
+ }
+
err := r.ParseForm()
if err != nil {
level.Warn(i.logger).Log("msg", "failed to parse HTTP request in flush handler", "err", err)
@@ -3531,19 +3659,42 @@ func (i *Ingester) ShutdownHandler(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusNoContent)
}
-// checkAvailable checks whether the ingester is available, and if it is not the case
-// returns an unavailableError error.
-// Using block store, the ingester is available only when it is in a Running state.
-// The ingester is not available when stopping to prevent any read or writes to the
-// TSDB after the ingester has closed them.
-func (i *Ingester) checkAvailable() error {
+// checkAvailableForRead checks whether the ingester is available for read requests,
+// and if it is not the case returns an unavailableError error.
+func (i *Ingester) checkAvailableForRead() error {
s := i.State()
+
+ // The ingester is not available when starting / stopping to prevent any read to
+ // TSDB when its closed.
if s == services.Running {
return nil
}
return newUnavailableError(s)
}
+// checkAvailableForPush checks whether the ingester is available for push requests,
+// and if it is not the case returns an unavailableError error.
+func (i *Ingester) checkAvailableForPush() error {
+ ingesterState := i.State()
+
+ // The ingester is not available when stopping to prevent any push to
+ // TSDB when its closed.
+ if ingesterState == services.Running {
+ return nil
+ }
+
+ // If ingest storage is enabled we also allow push requests when the ingester is starting
+ // as far as the ingest reader is either in starting or running state. This is required to
+ // let the ingest reader push data while replaying a partition at ingester startup.
+ if ingesterState == services.Starting && i.ingestReader != nil {
+ if readerState := i.ingestReader.State(); readerState == services.Starting || readerState == services.Running {
+ return nil
+ }
+ }
+
+ return newUnavailableError(ingesterState)
+}
+
// PushToStorage implements ingest.Pusher interface for ingestion via ingest-storage.
func (i *Ingester) PushToStorage(ctx context.Context, req *mimirpb.WriteRequest) error {
err := i.PushWithCleanup(ctx, req, func() { mimirpb.ReuseSlice(req.Timeseries) })
@@ -3689,7 +3840,7 @@ func (i *Ingester) purgeUserMetricsMetadata() {
// MetricsMetadata returns all the metrics metadata of a user.
func (i *Ingester) MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) (resp *client.MetricsMetadataResponse, err error) {
defer func() { err = i.mapReadErrorToErrorWithStatus(err) }()
- if err := i.checkAvailable(); err != nil {
+ if err := i.checkAvailableForRead(); err != nil {
return nil, err
}
@@ -3710,8 +3861,11 @@ func (i *Ingester) MetricsMetadata(ctx context.Context, req *client.MetricsMetad
// CheckReady is the readiness handler used to indicate to k8s when the ingesters
// are ready for the addition or removal of another ingester.
func (i *Ingester) CheckReady(ctx context.Context) error {
- if err := i.checkAvailable(); err != nil {
- return fmt.Errorf("ingester not ready: %v", err)
+ if err := i.checkAvailableForPush(); err != nil {
+ return fmt.Errorf("ingester not ready for pushes: %v", err)
+ }
+ if err := i.checkAvailableForRead(); err != nil {
+ return fmt.Errorf("ingester not ready for reads: %v", err)
}
return i.lifecycler.CheckReady(ctx)
}
@@ -3820,3 +3974,16 @@ func (i *Ingester) enforceReadConsistency(ctx context.Context, tenantID string)
return errors.Wrap(i.ingestReader.WaitReadConsistency(ctx), "wait for read consistency")
}
+
+func createManagerThenStartAndAwaitHealthy(ctx context.Context, srvs ...services.Service) (*services.Manager, error) {
+ manager, err := services.NewManager(srvs...)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := services.StartManagerAndAwaitHealthy(ctx, manager); err != nil {
+ return nil, err
+ }
+
+ return manager, nil
+}
diff --git a/pkg/ingester/ingester_ingest_storage_test.go b/pkg/ingester/ingester_ingest_storage_test.go
index a7824fac49c..8c55a6e9d21 100644
--- a/pkg/ingester/ingester_ingest_storage_test.go
+++ b/pkg/ingester/ingester_ingest_storage_test.go
@@ -12,6 +12,7 @@ import (
"net/http/httptest"
"os"
"slices"
+ "strings"
"sync"
"testing"
"time"
@@ -41,6 +42,198 @@ import (
"github.com/grafana/mimir/pkg/util/validation"
)
+func TestIngester_Start(t *testing.T) {
+ util_test.VerifyNoLeak(t)
+
+ t.Run("should replay the partition at startup (after a restart) and then join the ingesters and partitions ring", func(t *testing.T) {
+ var (
+ ctx = context.Background()
+ cfg = defaultIngesterTestConfig(t)
+ reg = prometheus.NewRegistry()
+ fetchRequestsCount = atomic.NewInt64(0)
+ series1 = mimirpb.PreallocTimeseries{TimeSeries: &mimirpb.TimeSeries{
+ Labels: mimirpb.FromLabelsToLabelAdapters(labels.FromStrings(labels.MetricName, "series_1")),
+ Samples: []mimirpb.Sample{{TimestampMs: 1000, Value: 10}},
+ }}
+ series2 = mimirpb.PreallocTimeseries{TimeSeries: &mimirpb.TimeSeries{
+ Labels: mimirpb.FromLabelsToLabelAdapters(labels.FromStrings(labels.MetricName, "series_2")),
+ Samples: []mimirpb.Sample{{TimestampMs: 1000, Value: 10}},
+ }}
+ )
+
+ const expectedSeriesToReplay = 2
+
+ // Configure the ingester to frequently run its internal services.
+ cfg.UpdateIngesterOwnedSeries = true
+ cfg.OwnedSeriesUpdateInterval = 100 * time.Millisecond
+ cfg.ActiveSeriesMetrics.UpdatePeriod = 100 * time.Millisecond
+ cfg.limitMetricsUpdatePeriod = 100 * time.Millisecond
+
+ // Configure the TSDB Head compaction interval to be greater than the high frequency
+ // expected while starting up.
+ const headCompactionIntervalWhileStarting = 100 * time.Millisecond
+ const headCompactionIntervalWhileRunning = time.Minute
+ cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval = headCompactionIntervalWhileRunning
+ cfg.BlocksStorageConfig.TSDB.HeadCompactionIntervalWhileStarting = headCompactionIntervalWhileStarting
+ cfg.BlocksStorageConfig.TSDB.HeadCompactionIntervalJitterEnabled = false
+
+ // Create the ingester.
+ overrides, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
+ require.NoError(t, err)
+ ingester, kafkaCluster, watcher := createTestIngesterWithIngestStorage(t, &cfg, overrides, reg)
+
+ // Mock the Kafka cluster to:
+ // - Count the Fetch requests.
+ // - Mock the ListOffsets response, returning the offset expected once the ingester can be
+ // considered having successfully caught up.
+ kafkaCluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
+ kafkaCluster.KeepControl()
+ fetchRequestsCount.Inc()
+
+ return nil, nil, false
+ })
+
+ kafkaCluster.ControlKey(int16(kmsg.ListOffsets), func(kreq kmsg.Request) (kmsg.Response, error, bool) {
+ kafkaCluster.KeepControl()
+
+ // Mock only requests for the partition "end" offset (identified by special timestamp -1).
+ req := kreq.(*kmsg.ListOffsetsRequest)
+ for _, topic := range req.Topics {
+ for _, partition := range topic.Partitions {
+ if partition.Timestamp != -1 {
+ return nil, nil, false
+ }
+ }
+ }
+
+ res := req.ResponseKind().(*kmsg.ListOffsetsResponse)
+ res.Topics = []kmsg.ListOffsetsResponseTopic{{
+ Topic: cfg.IngestStorageConfig.KafkaConfig.Topic,
+ Partitions: []kmsg.ListOffsetsResponseTopicPartition{{
+ Partition: ingester.ingestPartitionID,
+ ErrorCode: 0,
+ Offset: expectedSeriesToReplay,
+ }},
+ }}
+
+ return res, nil, true
+ })
+
+ // Create a Kafka writer and then write a series.
+ writer := ingest.NewWriter(cfg.IngestStorageConfig.KafkaConfig, log.NewNopLogger(), nil)
+ require.NoError(t, services.StartAndAwaitRunning(ctx, writer))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, writer))
+ })
+
+ partitionID, err := ingest.IngesterPartitionID(cfg.IngesterRing.InstanceID)
+ require.NoError(t, err)
+ require.NoError(t, writer.WriteSync(ctx, partitionID, userID, &mimirpb.WriteRequest{Timeseries: []mimirpb.PreallocTimeseries{series1}, Source: mimirpb.API}))
+
+ // Add the ingester in LEAVING state in the ring, in order to simulate an ingester restart.
+ // This will make the owned series tracker to correctly work at ingester startup.
+ require.NoError(t, cfg.IngesterRing.KVStore.Mock.CAS(context.Background(), IngesterRingKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ desc := ring.GetOrCreateRingDesc(in)
+ desc.AddIngester(
+ cfg.IngesterRing.InstanceID,
+ cfg.IngesterRing.InstanceAddr,
+ cfg.IngesterRing.InstanceZone,
+ cfg.IngesterRing.customTokenGenerator().GenerateTokens(512, nil),
+ ring.LEAVING,
+ time.Now())
+
+ return desc, true, nil
+ }))
+
+ // Add the partition and owner in the ring, in order to simulate an ingester restart.
+ require.NoError(t, cfg.IngesterPartitionRing.kvMock.CAS(context.Background(), PartitionRingKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ partitionID, err := ingest.IngesterPartitionID(cfg.IngesterRing.InstanceID)
+ if err != nil {
+ return nil, false, err
+ }
+
+ desc := ring.GetOrCreatePartitionRingDesc(in)
+ desc.AddPartition(partitionID, ring.PartitionActive, time.Now())
+ desc.AddOrUpdateOwner(cfg.IngesterRing.InstanceID, ring.OwnerDeleted, partitionID, time.Now())
+
+ return desc, true, nil
+ }))
+
+ // Start the ingester.
+ require.NoError(t, ingester.StartAsync(ctx))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, ingester))
+ })
+
+ // Wait until the Kafka cluster received the 1st Fetch request.
+ test.Poll(t, 5*time.Second, true, func() interface{} {
+ return fetchRequestsCount.Load() > 0
+ })
+
+ // At this point we expect the ingester is stuck in starting state while catching up
+ // replaying the partition. The catchup will not succeed until we'll write the next
+ // series to Kafka because we've mocked the Kafka to return a "last produced offset"
+ // in the future.
+
+ // We expect the following internal services to be already running at this point.
+ assert.Equal(t, services.Running, ingester.compactionService.State())
+ assert.Equal(t, services.Running, ingester.ownedSeriesService.State())
+ assert.Equal(t, services.Running, ingester.metricsUpdaterService.State())
+ assert.Equal(t, services.Running, ingester.metadataPurgerService.State())
+
+ // We expect the TSDB Head compaction to run while replaying from Kafka.
+ assert.Eventually(t, func() bool {
+ return testutil.ToFloat64(ingester.metrics.compactionsTriggered) > 0
+ }, time.Second, 10*time.Millisecond)
+
+ // We expect metrics to be updated.
+ test.Poll(t, time.Second, nil, func() interface{} {
+ return testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(`
+ # HELP cortex_ingester_active_series Number of currently active series per user.
+ # TYPE cortex_ingester_active_series gauge
+ cortex_ingester_active_series{user="%s"} 1
+
+ # HELP cortex_ingester_owned_series Number of currently owned series per user.
+ # TYPE cortex_ingester_owned_series gauge
+ cortex_ingester_owned_series{user="%s"} 1
+ `, userID, userID)), "cortex_ingester_active_series", "cortex_ingester_owned_series")
+ })
+
+ // We expect the ingester run TSDB Head compaction at higher frequency while catching up.
+ firstInterval, standardInterval := ingester.compactionServiceInterval()
+ assert.Equal(t, headCompactionIntervalWhileStarting, firstInterval)
+ assert.Equal(t, headCompactionIntervalWhileStarting, standardInterval)
+
+ assert.Eventually(t, func() bool {
+ return testutil.ToFloat64(ingester.metrics.compactionsTriggered) > 0
+ }, 5*time.Second, 10*time.Millisecond)
+
+ // Since the ingester it still replaying the partition we expect it to be in starting state.
+ assert.Equal(t, services.Starting, ingester.State())
+ assert.Equal(t, services.New, ingester.lifecycler.State())
+
+ // Write one more request to Kafka. This will cause the ingester to consume up until the
+ // "last produced offset" returned by the mocked Kafka, and so consider the catch up complete.
+ require.NoError(t, writer.WriteSync(ctx, partitionID, userID, &mimirpb.WriteRequest{Timeseries: []mimirpb.PreallocTimeseries{series2}, Source: mimirpb.API}))
+
+ // We expect the ingester to catch up, and then switch to Running state.
+ test.Poll(t, 5*time.Second, services.Running, func() interface{} {
+ return ingester.State()
+ })
+ assert.Equal(t, services.Running, ingester.lifecycler.State())
+
+ assert.Eventually(t, func() bool {
+ return ingester.lifecycler.GetState() == ring.ACTIVE
+ }, time.Second, 10*time.Millisecond)
+
+ assert.Eventually(t, func() bool {
+ return slices.Equal(
+ watcher.PartitionRing().PartitionOwnerIDs(partitionID),
+ []string{ingester.cfg.IngesterRing.InstanceID})
+ }, time.Second, 10*time.Millisecond)
+ })
+}
+
func TestIngester_QueryStream_IngestStorageReadConsistency(t *testing.T) {
const (
metricName = "series_1"
@@ -73,7 +266,7 @@ func TestIngester_QueryStream_IngestStorageReadConsistency(t *testing.T) {
// to "strong" then a query shouldn't succeed until the Fetch requests succeed.
failFetch := atomic.NewBool(true)
- kafkaCluster.ControlKey(int16(kmsg.Fetch), func(kreq kmsg.Request) (kmsg.Response, error, bool) {
+ kafkaCluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
kafkaCluster.KeepControl()
if failFetch.Load() {
@@ -154,20 +347,13 @@ func TestIngester_PrepareShutdownHandler_IngestStorageSupport(t *testing.T) {
// Start ingester.
cfg := defaultIngesterTestConfig(t)
- ingester, _, _ := createTestIngesterWithIngestStorage(t, &cfg, overrides, reg)
+ ingester, _, watcher := createTestIngesterWithIngestStorage(t, &cfg, overrides, reg)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(ctx, ingester))
t.Cleanup(func() {
require.NoError(t, services.StopAndAwaitTerminated(ctx, ingester))
})
- // Start a watcher used to assert on the partitions ring.
- watcher := ring.NewPartitionRingWatcher(PartitionRingName, PartitionRingKey, cfg.IngesterPartitionRing.kvMock, log.NewNopLogger(), nil)
- require.NoError(t, services.StartAndAwaitRunning(ctx, watcher))
- t.Cleanup(func() {
- require.NoError(t, services.StopAndAwaitTerminated(ctx, watcher))
- })
-
// Wait until it's healthy
test.Poll(t, 1*time.Second, 1, func() interface{} {
return ingester.lifecycler.HealthyInstancesCount()
@@ -219,20 +405,13 @@ func TestIngester_PreparePartitionDownscaleHandler(t *testing.T) {
setup := func(t *testing.T, cfg Config) (*Ingester, *ring.PartitionRingWatcher) {
// Start ingester.
- ingester, _, _ := createTestIngesterWithIngestStorage(t, &cfg, overrides, prometheus.NewPedanticRegistry())
+ ingester, _, watcher := createTestIngesterWithIngestStorage(t, &cfg, overrides, prometheus.NewPedanticRegistry())
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(ctx, ingester))
t.Cleanup(func() {
require.NoError(t, services.StopAndAwaitTerminated(ctx, ingester))
})
- // Start a watcher used to assert on the partitions ring.
- watcher := ring.NewPartitionRingWatcher(PartitionRingName, PartitionRingKey, cfg.IngesterPartitionRing.kvMock, log.NewNopLogger(), nil)
- require.NoError(t, services.StartAndAwaitRunning(ctx, watcher))
- t.Cleanup(func() {
- require.NoError(t, services.StopAndAwaitTerminated(ctx, watcher))
- })
-
// Wait until it's healthy
test.Poll(t, 1*time.Second, 1, func() interface{} {
return ingester.lifecycler.HealthyInstancesCount()
@@ -348,7 +527,7 @@ func TestIngester_ShouldNotCreatePartitionIfThereIsShutdownMarker(t *testing.T)
require.NoError(t, err)
cfg := defaultIngesterTestConfig(t)
- ingester, _, _ := createTestIngesterWithIngestStorage(t, &cfg, overrides, prometheus.NewPedanticRegistry())
+ ingester, _, watcher := createTestIngesterWithIngestStorage(t, &cfg, overrides, prometheus.NewPedanticRegistry())
// Create the shutdown marker.
require.NoError(t, os.MkdirAll(cfg.BlocksStorageConfig.TSDB.Dir, os.ModePerm))
@@ -361,13 +540,6 @@ func TestIngester_ShouldNotCreatePartitionIfThereIsShutdownMarker(t *testing.T)
_ = services.StopAndAwaitTerminated(ctx, ingester)
})
- // Start a watcher used to assert on the partitions ring.
- watcher := ring.NewPartitionRingWatcher(PartitionRingName, PartitionRingKey, cfg.IngesterPartitionRing.kvMock, log.NewNopLogger(), nil)
- require.NoError(t, services.StartAndAwaitRunning(ctx, watcher))
- t.Cleanup(func() {
- require.NoError(t, services.StopAndAwaitTerminated(ctx, watcher))
- })
-
// No matter how long we wait, we expect the ingester service to hung in the starting state
// given it's not allowed to create the partition and the partition doesn't exist in the ring.
time.Sleep(10 * cfg.IngesterPartitionRing.lifecyclerPollingInterval)
@@ -377,9 +549,33 @@ func TestIngester_ShouldNotCreatePartitionIfThereIsShutdownMarker(t *testing.T)
assert.Empty(t, watcher.PartitionRing().PartitionOwnerIDs(ingester.ingestPartitionID))
}
-// Returned ingester and ring watcher are NOT started.
+func TestIngester_compactionServiceInterval(t *testing.T) {
+ cfg := defaultIngesterTestConfig(t)
+ overrides, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
+ require.NoError(t, err)
+ ingester, _, _ := createTestIngesterWithIngestStorage(t, &cfg, overrides, nil)
+
+ ingester.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval = time.Minute
+ ingester.cfg.BlocksStorageConfig.TSDB.HeadCompactionIntervalJitterEnabled = false
+
+ firstInterval, standardInterval := ingester.compactionServiceInterval()
+ assert.Equal(t, time.Minute, firstInterval)
+ assert.Equal(t, time.Minute, standardInterval)
+
+ ingester.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval = time.Minute
+ ingester.cfg.BlocksStorageConfig.TSDB.HeadCompactionIntervalJitterEnabled = true
+
+ firstInterval, standardInterval = ingester.compactionServiceInterval()
+ assert.Less(t, firstInterval, time.Minute)
+ assert.Equal(t, time.Minute, standardInterval)
+}
+
+// Returned ingester is NOT started.
func createTestIngesterWithIngestStorage(t testing.TB, ingesterCfg *Config, overrides *validation.Overrides, reg prometheus.Registerer) (*Ingester, *kfake.Cluster, *ring.PartitionRingWatcher) {
- defaultIngesterConfig := defaultIngesterTestConfig(t)
+ var (
+ ctx = context.Background()
+ defaultIngesterConfig = defaultIngesterTestConfig(t)
+ )
// Always disable gRPC Push API when testing ingest store.
ingesterCfg.PushGrpcMethodEnabled = false
@@ -418,7 +614,14 @@ func createTestIngesterWithIngestStorage(t testing.TB, ingesterCfg *Config, over
// Disable TSDB head compaction jitter to have predictable tests.
ingesterCfg.BlocksStorageConfig.TSDB.HeadCompactionIntervalJitterEnabled = false
+ // Create and start the partition ring watcher. Since it's a dependency which is passed
+ // to ingester New() function, we start the watcher beforehand.
prw := ring.NewPartitionRingWatcher(PartitionRingName, PartitionRingKey, kv, log.NewNopLogger(), nil)
+ require.NoError(t, services.StartAndAwaitRunning(ctx, prw))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, prw))
+ })
+
ingester, err := New(*ingesterCfg, overrides, nil, prw, nil, reg, util_test.NewTestingLogger(t))
require.NoError(t, err)
diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
index 8733dd13c2c..5e8e5921e96 100644
--- a/pkg/ingester/ingester_test.go
+++ b/pkg/ingester/ingester_test.go
@@ -15,6 +15,7 @@ import (
"io"
"math"
"net"
+ "net/http"
"net/http/httptest"
"net/url"
"os"
@@ -67,6 +68,7 @@ import (
"github.com/grafana/mimir/pkg/storage/tsdb/block"
"github.com/grafana/mimir/pkg/usagestats"
"github.com/grafana/mimir/pkg/util"
+ "github.com/grafana/mimir/pkg/util/globalerror"
util_math "github.com/grafana/mimir/pkg/util/math"
util_test "github.com/grafana/mimir/pkg/util/test"
"github.com/grafana/mimir/pkg/util/validation"
@@ -98,6 +100,22 @@ func TestIngester_Push(t *testing.T) {
userID := "test"
now := time.Now()
+ histogramWithBucketCountMismatch := util_test.GenerateTestHistogram(1)
+ histogramWithBucketCountMismatch.Count++
+
+ histogramWithCountNotBigEnough := util_test.GenerateTestHistogram(1)
+ histogramWithCountNotBigEnough.Sum = math.NaN()
+ histogramWithCountNotBigEnough.Count--
+
+ histogramWithNegativeBucketCount := util_test.GenerateTestHistogram(1)
+ histogramWithNegativeBucketCount.NegativeBuckets[1] = -100
+
+ histogramWithSpanNegativeOffset := util_test.GenerateTestHistogram(1)
+ histogramWithSpanNegativeOffset.PositiveSpans[1].Offset = -2 // The first span can start at negative offset, hence the 1.
+
+ histogramWithSpansBucketsMismatch := util_test.GenerateTestHistogram(1)
+ histogramWithSpansBucketsMismatch.PositiveSpans[1].Length++
+
tests := map[string]struct {
reqs []*mimirpb.WriteRequest
expectedErr error
@@ -1154,6 +1172,211 @@ func TestIngester_Push(t *testing.T) {
cortex_ingester_tsdb_head_max_timestamp_seconds 0.01
`,
},
+ "should soft fail if histogram has a bucket count vs overall count mismatch": {
+ nativeHistograms: true,
+ reqs: []*mimirpb.WriteRequest{
+ mimirpb.NewWriteRequest(nil, mimirpb.API).AddHistogramSeries(
+ [][]mimirpb.LabelAdapter{metricLabelAdapters},
+ []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(10, histogramWithBucketCountMismatch)},
+ nil,
+ ),
+ },
+ // Expect the error string instead of constructing the error to catch if Prometheus changes the error message.
+ expectedErr: newErrorWithStatus(wrapOrAnnotateWithUser(newNativeHistogramValidationError(globalerror.NativeHistogramCountMismatch, fmt.Errorf("21 observations found in buckets, but the Count field is 22: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)"), model.Time(10), []mimirpb.LabelAdapter{metricLabelAdapters[0]}), userID), codes.FailedPrecondition),
+ expectedMetrics: `
+ # HELP cortex_ingester_ingested_samples_total The total number of samples ingested per user.
+ # TYPE cortex_ingester_ingested_samples_total counter
+ cortex_ingester_ingested_samples_total{user="test"} 0
+ # HELP cortex_discarded_samples_total The total number of samples that were discarded.
+ # TYPE cortex_discarded_samples_total counter
+ cortex_discarded_samples_total{group="",reason="invalid-native-histogram",user="test"} 1
+ # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion per user.
+ # TYPE cortex_ingester_ingested_samples_failures_total counter
+ cortex_ingester_ingested_samples_failures_total{user="test"} 1
+ # HELP cortex_ingester_memory_users The current number of users in memory.
+ # TYPE cortex_ingester_memory_users gauge
+ cortex_ingester_memory_users 1
+ # HELP cortex_ingester_memory_series The current number of series in memory.
+ # TYPE cortex_ingester_memory_series gauge
+ cortex_ingester_memory_series 0
+ # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+ # TYPE cortex_ingester_memory_series_created_total counter
+ cortex_ingester_memory_series_created_total{user="test"} 0
+ # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+ # TYPE cortex_ingester_memory_series_removed_total counter
+ cortex_ingester_memory_series_removed_total{user="test"} 0
+ # HELP cortex_ingester_tsdb_head_min_timestamp_seconds Minimum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_min_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_min_timestamp_seconds 0.01
+ # HELP cortex_ingester_tsdb_head_max_timestamp_seconds Maximum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_max_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_max_timestamp_seconds 0.01
+ `,
+ },
+ "should soft fail if histogram has a bucket count higher than overall count and sum NaN": {
+ nativeHistograms: true,
+ reqs: []*mimirpb.WriteRequest{
+ mimirpb.NewWriteRequest(nil, mimirpb.API).AddHistogramSeries(
+ [][]mimirpb.LabelAdapter{metricLabelAdapters},
+ []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(10, histogramWithCountNotBigEnough)},
+ nil,
+ ),
+ },
+ // Expect the error string instead of constructing the error to catch if Prometheus changes the error message.
+ expectedErr: newErrorWithStatus(wrapOrAnnotateWithUser(newNativeHistogramValidationError(globalerror.NativeHistogramCountNotBigEnough, fmt.Errorf("21 observations found in buckets, but the Count field is 20: histogram's observation count should be at least the number of observations found in the buckets"), model.Time(10), []mimirpb.LabelAdapter{metricLabelAdapters[0]}), userID), codes.FailedPrecondition),
+ expectedMetrics: `
+ # HELP cortex_ingester_ingested_samples_total The total number of samples ingested per user.
+ # TYPE cortex_ingester_ingested_samples_total counter
+ cortex_ingester_ingested_samples_total{user="test"} 0
+ # HELP cortex_discarded_samples_total The total number of samples that were discarded.
+ # TYPE cortex_discarded_samples_total counter
+ cortex_discarded_samples_total{group="",reason="invalid-native-histogram",user="test"} 1
+ # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion per user.
+ # TYPE cortex_ingester_ingested_samples_failures_total counter
+ cortex_ingester_ingested_samples_failures_total{user="test"} 1
+ # HELP cortex_ingester_memory_users The current number of users in memory.
+ # TYPE cortex_ingester_memory_users gauge
+ cortex_ingester_memory_users 1
+ # HELP cortex_ingester_memory_series The current number of series in memory.
+ # TYPE cortex_ingester_memory_series gauge
+ cortex_ingester_memory_series 0
+ # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+ # TYPE cortex_ingester_memory_series_created_total counter
+ cortex_ingester_memory_series_created_total{user="test"} 0
+ # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+ # TYPE cortex_ingester_memory_series_removed_total counter
+ cortex_ingester_memory_series_removed_total{user="test"} 0
+ # HELP cortex_ingester_tsdb_head_min_timestamp_seconds Minimum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_min_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_min_timestamp_seconds 0.01
+ # HELP cortex_ingester_tsdb_head_max_timestamp_seconds Maximum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_max_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_max_timestamp_seconds 0.01
+ `,
+ },
+ "should soft fail if histogram has a negative span offset": {
+ nativeHistograms: true,
+ reqs: []*mimirpb.WriteRequest{
+ mimirpb.NewWriteRequest(nil, mimirpb.API).AddHistogramSeries(
+ [][]mimirpb.LabelAdapter{metricLabelAdapters},
+ []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(10, histogramWithSpanNegativeOffset)},
+ nil,
+ ),
+ },
+ // Expect the error string instead of constructing the error to catch if Prometheus changes the error message.
+ expectedErr: newErrorWithStatus(wrapOrAnnotateWithUser(newNativeHistogramValidationError(globalerror.NativeHistogramSpanNegativeOffset, fmt.Errorf("positive side: span number 2 with offset -2: histogram has a span whose offset is negative"), model.Time(10), []mimirpb.LabelAdapter{metricLabelAdapters[0]}), userID), codes.FailedPrecondition),
+ expectedMetrics: `
+ # HELP cortex_ingester_ingested_samples_total The total number of samples ingested per user.
+ # TYPE cortex_ingester_ingested_samples_total counter
+ cortex_ingester_ingested_samples_total{user="test"} 0
+ # HELP cortex_discarded_samples_total The total number of samples that were discarded.
+ # TYPE cortex_discarded_samples_total counter
+ cortex_discarded_samples_total{group="",reason="invalid-native-histogram",user="test"} 1
+ # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion per user.
+ # TYPE cortex_ingester_ingested_samples_failures_total counter
+ cortex_ingester_ingested_samples_failures_total{user="test"} 1
+ # HELP cortex_ingester_memory_users The current number of users in memory.
+ # TYPE cortex_ingester_memory_users gauge
+ cortex_ingester_memory_users 1
+ # HELP cortex_ingester_memory_series The current number of series in memory.
+ # TYPE cortex_ingester_memory_series gauge
+ cortex_ingester_memory_series 0
+ # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+ # TYPE cortex_ingester_memory_series_created_total counter
+ cortex_ingester_memory_series_created_total{user="test"} 0
+ # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+ # TYPE cortex_ingester_memory_series_removed_total counter
+ cortex_ingester_memory_series_removed_total{user="test"} 0
+ # HELP cortex_ingester_tsdb_head_min_timestamp_seconds Minimum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_min_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_min_timestamp_seconds 0.01
+ # HELP cortex_ingester_tsdb_head_max_timestamp_seconds Maximum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_max_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_max_timestamp_seconds 0.01
+ `,
+ },
+ "should soft fail if histogram has different number of buckets then encoded in spans": {
+ nativeHistograms: true,
+ reqs: []*mimirpb.WriteRequest{
+ mimirpb.NewWriteRequest(nil, mimirpb.API).AddHistogramSeries(
+ [][]mimirpb.LabelAdapter{metricLabelAdapters},
+ []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(10, histogramWithSpansBucketsMismatch)},
+ nil,
+ ),
+ },
+ // Expect the error string instead of constructing the error to catch if Prometheus changes the error message.
+ expectedErr: newErrorWithStatus(wrapOrAnnotateWithUser(newNativeHistogramValidationError(globalerror.NativeHistogramSpansBucketsMismatch, fmt.Errorf("positive side: spans need 5 buckets, have 4 buckets: histogram spans specify different number of buckets than provided"), model.Time(10), []mimirpb.LabelAdapter{metricLabelAdapters[0]}), userID), codes.FailedPrecondition),
+ expectedMetrics: `
+ # HELP cortex_ingester_ingested_samples_total The total number of samples ingested per user.
+ # TYPE cortex_ingester_ingested_samples_total counter
+ cortex_ingester_ingested_samples_total{user="test"} 0
+ # HELP cortex_discarded_samples_total The total number of samples that were discarded.
+ # TYPE cortex_discarded_samples_total counter
+ cortex_discarded_samples_total{group="",reason="invalid-native-histogram",user="test"} 1
+ # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion per user.
+ # TYPE cortex_ingester_ingested_samples_failures_total counter
+ cortex_ingester_ingested_samples_failures_total{user="test"} 1
+ # HELP cortex_ingester_memory_users The current number of users in memory.
+ # TYPE cortex_ingester_memory_users gauge
+ cortex_ingester_memory_users 1
+ # HELP cortex_ingester_memory_series The current number of series in memory.
+ # TYPE cortex_ingester_memory_series gauge
+ cortex_ingester_memory_series 0
+ # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+ # TYPE cortex_ingester_memory_series_created_total counter
+ cortex_ingester_memory_series_created_total{user="test"} 0
+ # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+ # TYPE cortex_ingester_memory_series_removed_total counter
+ cortex_ingester_memory_series_removed_total{user="test"} 0
+ # HELP cortex_ingester_tsdb_head_min_timestamp_seconds Minimum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_min_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_min_timestamp_seconds 0.01
+ # HELP cortex_ingester_tsdb_head_max_timestamp_seconds Maximum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_max_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_max_timestamp_seconds 0.01
+ `,
+ },
+ "should soft fail if histogram has a negative bucket count": {
+ nativeHistograms: true,
+ reqs: []*mimirpb.WriteRequest{
+ mimirpb.NewWriteRequest(nil, mimirpb.API).AddHistogramSeries(
+ [][]mimirpb.LabelAdapter{metricLabelAdapters},
+ []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(10, histogramWithNegativeBucketCount)},
+ nil,
+ ),
+ },
+ // Expect the error string instead of constructing the error to catch if Prometheus changes the error message.
+ expectedErr: newErrorWithStatus(wrapOrAnnotateWithUser(newNativeHistogramValidationError(globalerror.NativeHistogramNegativeBucketCount, fmt.Errorf("negative side: bucket number 2 has observation count of -98: histogram has a bucket whose observation count is negative"), model.Time(10), []mimirpb.LabelAdapter{metricLabelAdapters[0]}), userID), codes.FailedPrecondition),
+ expectedMetrics: `
+ # HELP cortex_ingester_ingested_samples_total The total number of samples ingested per user.
+ # TYPE cortex_ingester_ingested_samples_total counter
+ cortex_ingester_ingested_samples_total{user="test"} 0
+ # HELP cortex_discarded_samples_total The total number of samples that were discarded.
+ # TYPE cortex_discarded_samples_total counter
+ cortex_discarded_samples_total{group="",reason="invalid-native-histogram",user="test"} 1
+ # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion per user.
+ # TYPE cortex_ingester_ingested_samples_failures_total counter
+ cortex_ingester_ingested_samples_failures_total{user="test"} 1
+ # HELP cortex_ingester_memory_users The current number of users in memory.
+ # TYPE cortex_ingester_memory_users gauge
+ cortex_ingester_memory_users 1
+ # HELP cortex_ingester_memory_series The current number of series in memory.
+ # TYPE cortex_ingester_memory_series gauge
+ cortex_ingester_memory_series 0
+ # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+ # TYPE cortex_ingester_memory_series_created_total counter
+ cortex_ingester_memory_series_created_total{user="test"} 0
+ # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+ # TYPE cortex_ingester_memory_series_removed_total counter
+ cortex_ingester_memory_series_removed_total{user="test"} 0
+ # HELP cortex_ingester_tsdb_head_min_timestamp_seconds Minimum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_min_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_min_timestamp_seconds 0.01
+ # HELP cortex_ingester_tsdb_head_max_timestamp_seconds Maximum timestamp of the head block across all tenants.
+ # TYPE cortex_ingester_tsdb_head_max_timestamp_seconds gauge
+ cortex_ingester_tsdb_head_max_timestamp_seconds 0.01
+ `,
+ },
"should soft fail on existing histogram series if all exemplars are out of order": {
maxExemplars: 2,
nativeHistograms: true,
@@ -2623,8 +2846,8 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
runBenchmark func(b *testing.B, ingester *Ingester, metrics [][]mimirpb.LabelAdapter, samples []mimirpb.Sample)
}{
"out of bound samples": {
- prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true },
- beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+ prepareConfig: func(*validation.Limits, *InstanceLimits) bool { return true },
+ beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) {
// Push a single time series to set the TSDB min time.
currTimeReq := mimirpb.ToWriteRequest(
[][]mimirpb.LabelAdapter{{{Name: labels.MetricName, Value: metricName}}},
@@ -2648,7 +2871,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
},
},
"out-of-order samples": {
- prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true },
+ prepareConfig: func(*validation.Limits, *InstanceLimits) bool { return true },
beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
// For each series, push a single sample with a timestamp greater than next pushes.
for i := 0; i < numSeriesPerRequest; i++ {
@@ -2675,11 +2898,11 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
},
},
"per-user series limit reached": {
- prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+ prepareConfig: func(limits *validation.Limits, _ *InstanceLimits) bool {
limits.MaxGlobalSeriesPerUser = 1
return true
},
- beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+ beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) {
// Push a series with a metric name different than the one used during the benchmark.
currTimeReq := mimirpb.ToWriteRequest(
[][]mimirpb.LabelAdapter{{{Name: labels.MetricName, Value: "another"}}},
@@ -2700,11 +2923,11 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
},
},
"per-metric series limit reached": {
- prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+ prepareConfig: func(limits *validation.Limits, _ *InstanceLimits) bool {
limits.MaxGlobalSeriesPerMetric = 1
return true
},
- beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+ beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) {
// Push a series with the same metric name but different labels than the one used during the benchmark.
currTimeReq := mimirpb.ToWriteRequest(
[][]mimirpb.LabelAdapter{{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: "another"}}},
@@ -2725,14 +2948,14 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
},
},
"very low ingestion rate limit": {
- prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+ prepareConfig: func(_ *validation.Limits, instanceLimits *InstanceLimits) bool {
if instanceLimits == nil {
return false
}
instanceLimits.MaxIngestionRate = 0.00001 // very low
return true
},
- beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+ beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) {
// Send a lot of samples
_, err := ingester.Push(ctx, generateSamplesForLabel(labels.FromStrings(labels.MetricName, "test"), 1, 10000))
require.NoError(b, err)
@@ -2748,14 +2971,14 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
},
},
"max number of tenants reached": {
- prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+ prepareConfig: func(_ *validation.Limits, instanceLimits *InstanceLimits) bool {
if instanceLimits == nil {
return false
}
instanceLimits.MaxInMemoryTenants = 1
return true
},
- beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+ beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) {
// Send some samples for one tenant (not the same that is used during the test)
ctx := user.InjectOrgID(context.Background(), "different_tenant")
_, err := ingester.Push(ctx, generateSamplesForLabel(labels.FromStrings(labels.MetricName, "test"), 1, 10000))
@@ -2770,14 +2993,14 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
},
},
"max number of series reached": {
- prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+ prepareConfig: func(_ *validation.Limits, instanceLimits *InstanceLimits) bool {
if instanceLimits == nil {
return false
}
instanceLimits.MaxInMemorySeries = 1
return true
},
- beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+ beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) {
_, err := ingester.Push(ctx, generateSamplesForLabel(labels.FromStrings(labels.MetricName, "test"), 1, 10000))
require.NoError(b, err)
},
@@ -2789,14 +3012,14 @@ func Benchmark_Ingester_PushOnError(b *testing.B) {
},
},
"max inflight requests reached": {
- prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+ prepareConfig: func(_ *validation.Limits, instanceLimits *InstanceLimits) bool {
if instanceLimits == nil {
return false
}
instanceLimits.MaxInflightPushRequests = 1
return true
},
- beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+ beforeBenchmark: func(_ *testing.B, ingester *Ingester, _ int) {
ingester.inflightPushRequests.Inc()
},
runBenchmark: func(b *testing.B, ingester *Ingester, metrics [][]mimirpb.LabelAdapter, samples []mimirpb.Sample) {
@@ -5065,7 +5288,7 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) {
},
"should not load any TSDB if the root directory is empty": {
walReplayConcurrency: 10,
- setup: func(t *testing.T, dir string) {},
+ setup: func(*testing.T, string) {},
check: func(t *testing.T, i *Ingester) {
require.Zero(t, len(i.tsdbs))
},
@@ -5399,7 +5622,7 @@ func TestIngester_closeAndDeleteUserTSDBIfIdle_shouldNotCloseTSDBIfShippingIsInP
// Mock the shipper to slow down Sync() execution.
s := mockUserShipper(t, i)
- s.On("Sync", mock.Anything).Run(func(args mock.Arguments) {
+ s.On("Sync", mock.Anything).Run(func(mock.Arguments) {
time.Sleep(3 * time.Second)
}).Return(0, nil)
@@ -5858,6 +6081,26 @@ func TestIngester_flushing(t *testing.T) {
require.Equal(t, 50*time.Hour.Milliseconds()+1, blocks[2].Meta().MaxTime) // Block maxt is exclusive.
},
},
+
+ "should not allow to flush blocks with flush API endpoint if ingester is not in Running state": {
+ setupIngester: func(cfg *Config) {
+ cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = false
+ },
+
+ action: func(t *testing.T, i *Ingester, _ *prometheus.Registry) {
+ // Stop the ingester.
+ require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
+
+ rec := httptest.NewRecorder()
+ i.FlushHandler(rec, httptest.NewRequest("POST", "/ingester/flush?wait=true", nil))
+
+ assert.Equal(t, http.StatusServiceUnavailable, rec.Result().StatusCode)
+
+ body, err := io.ReadAll(rec.Result().Body)
+ require.NoError(t, err)
+ assert.Equal(t, newUnavailableError(services.Terminated).Error(), string(body))
+ },
+ },
} {
t.Run(name, func(t *testing.T) {
cfg := defaultIngesterTestConfig(t)
@@ -7326,7 +7569,9 @@ func testIngesterInflightPushRequests(t *testing.T, i *Ingester, reg prometheus.
var optional middleware.OptionalLogging
require.ErrorAs(t, err, &optional)
- require.False(t, optional.ShouldLog(ctx, time.Duration(0)), "expected not to log via .ShouldLog()")
+
+ shouldLog, _ := optional.ShouldLog(ctx)
+ require.False(t, shouldLog, "expected not to log via .ShouldLog()")
s, ok := grpcutil.ErrorToStatus(err)
require.True(t, ok, "expected to be able to convert to gRPC status")
@@ -7449,7 +7694,9 @@ func TestIngester_inflightPushRequestsBytes(t *testing.T) {
var optional middleware.OptionalLogging
require.ErrorAs(t, err, &optional)
- require.False(t, optional.ShouldLog(ctx, time.Duration(0)), "expected not to log via .ShouldLog()")
+
+ shouldLog, _ := optional.ShouldLog(ctx)
+ require.False(t, shouldLog, "expected not to log via .ShouldLog()")
s, ok := grpcutil.ErrorToStatus(err)
require.True(t, ok, "expected to be able to convert to gRPC status")
@@ -8341,7 +8588,7 @@ func TestIngesterActiveSeries(t *testing.T) {
},
},
"active series for cardinality API": {
- test: func(t *testing.T, ingester *Ingester, gatherer prometheus.Gatherer) {
+ test: func(t *testing.T, ingester *Ingester, _ prometheus.Gatherer) {
pushWithUser(t, ingester, labelsToPush, userID, req)
pushWithUser(t, ingester, labelsToPush, userID2, req)
pushWithUser(t, ingester, labelsToPushHist, userID, reqHist)
@@ -10269,7 +10516,9 @@ func TestIngester_lastUpdatedTimeIsNotInTheFuture(t *testing.T) {
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(ctx, i))
- defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, i))
+ })
// Wait until it's healthy
test.Poll(t, 1*time.Second, 1, func() interface{} {
@@ -10400,7 +10649,7 @@ func newFailingIngester(t *testing.T, cfg Config, kvStore kv.Client, failingCaus
if kvStore != nil {
fI.kvStore = kvStore
}
- fI.BasicService = services.NewBasicService(fI.starting, fI.updateLoop, fI.stopping)
+ fI.BasicService = services.NewBasicService(fI.starting, fI.ingesterRunning, fI.stopping)
return fI
}
diff --git a/pkg/ingester/label_names_and_values_test.go b/pkg/ingester/label_names_and_values_test.go
index 2f0f710d81c..8d9f9badbf1 100644
--- a/pkg/ingester/label_names_and_values_test.go
+++ b/pkg/ingester/label_names_and_values_test.go
@@ -56,7 +56,7 @@ func TestLabelNamesAndValuesAreSentInBatches(t *testing.T) {
}
mockServer := mockLabelNamesAndValuesServer{context: context.Background()}
var stream client.Ingester_LabelNamesAndValuesServer = &mockServer
- var valueFilter = func(name, value string) (bool, error) {
+ var valueFilter = func(string, string) (bool, error) {
return true, nil
}
require.NoError(t, labelNamesAndValues(&mockIndex{existingLabels: existingLabels}, []*labels.Matcher{}, 32, stream, valueFilter))
@@ -95,7 +95,7 @@ func TestLabelNamesAndValues_FilteredValues(t *testing.T) {
}
mockServer := mockLabelNamesAndValuesServer{context: context.Background()}
var stream client.Ingester_LabelNamesAndValuesServer = &mockServer
- var valueFilter = func(name, value string) (bool, error) {
+ var valueFilter = func(_, value string) (bool, error) {
return strings.Contains(value, "0"), nil
}
require.NoError(t, labelNamesAndValues(&mockIndex{existingLabels: existingLabels}, []*labels.Matcher{}, 32, stream, valueFilter))
@@ -286,7 +286,7 @@ func TestLabelNamesAndValues_ContextCancellation(t *testing.T) {
opDelay: idxOpDelay,
}
- var valueFilter = func(name, value string) (bool, error) {
+ var valueFilter = func(string, string) (bool, error) {
return true, nil
}
doneCh := make(chan error, 1)
diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go
index 2c422c24fab..833a802fb04 100644
--- a/pkg/ingester/metrics.go
+++ b/pkg/ingester/metrics.go
@@ -420,24 +420,26 @@ func (m *ingesterMetrics) deletePerUserCustomTrackerMetrics(userID string, custo
}
type discardedMetrics struct {
- sampleOutOfBounds *prometheus.CounterVec
- sampleOutOfOrder *prometheus.CounterVec
- sampleTooOld *prometheus.CounterVec
- sampleTooFarInFuture *prometheus.CounterVec
- newValueForTimestamp *prometheus.CounterVec
- perUserSeriesLimit *prometheus.CounterVec
- perMetricSeriesLimit *prometheus.CounterVec
+ sampleOutOfBounds *prometheus.CounterVec
+ sampleOutOfOrder *prometheus.CounterVec
+ sampleTooOld *prometheus.CounterVec
+ sampleTooFarInFuture *prometheus.CounterVec
+ newValueForTimestamp *prometheus.CounterVec
+ perUserSeriesLimit *prometheus.CounterVec
+ perMetricSeriesLimit *prometheus.CounterVec
+ invalidNativeHistogram *prometheus.CounterVec
}
func newDiscardedMetrics(r prometheus.Registerer) *discardedMetrics {
return &discardedMetrics{
- sampleOutOfBounds: validation.DiscardedSamplesCounter(r, reasonSampleOutOfBounds),
- sampleOutOfOrder: validation.DiscardedSamplesCounter(r, reasonSampleOutOfOrder),
- sampleTooOld: validation.DiscardedSamplesCounter(r, reasonSampleTooOld),
- sampleTooFarInFuture: validation.DiscardedSamplesCounter(r, reasonSampleTooFarInFuture),
- newValueForTimestamp: validation.DiscardedSamplesCounter(r, reasonNewValueForTimestamp),
- perUserSeriesLimit: validation.DiscardedSamplesCounter(r, reasonPerUserSeriesLimit),
- perMetricSeriesLimit: validation.DiscardedSamplesCounter(r, reasonPerMetricSeriesLimit),
+ sampleOutOfBounds: validation.DiscardedSamplesCounter(r, reasonSampleOutOfBounds),
+ sampleOutOfOrder: validation.DiscardedSamplesCounter(r, reasonSampleOutOfOrder),
+ sampleTooOld: validation.DiscardedSamplesCounter(r, reasonSampleTooOld),
+ sampleTooFarInFuture: validation.DiscardedSamplesCounter(r, reasonSampleTooFarInFuture),
+ newValueForTimestamp: validation.DiscardedSamplesCounter(r, reasonNewValueForTimestamp),
+ perUserSeriesLimit: validation.DiscardedSamplesCounter(r, reasonPerUserSeriesLimit),
+ perMetricSeriesLimit: validation.DiscardedSamplesCounter(r, reasonPerMetricSeriesLimit),
+ invalidNativeHistogram: validation.DiscardedSamplesCounter(r, reasonInvalidNativeHistogram),
}
}
@@ -449,6 +451,7 @@ func (m *discardedMetrics) DeletePartialMatch(filter prometheus.Labels) {
m.newValueForTimestamp.DeletePartialMatch(filter)
m.perUserSeriesLimit.DeletePartialMatch(filter)
m.perMetricSeriesLimit.DeletePartialMatch(filter)
+ m.invalidNativeHistogram.DeletePartialMatch(filter)
}
func (m *discardedMetrics) DeleteLabelValues(userID string, group string) {
@@ -459,6 +462,7 @@ func (m *discardedMetrics) DeleteLabelValues(userID string, group string) {
m.newValueForTimestamp.DeleteLabelValues(userID, group)
m.perUserSeriesLimit.DeleteLabelValues(userID, group)
m.perMetricSeriesLimit.DeleteLabelValues(userID, group)
+ m.invalidNativeHistogram.DeleteLabelValues(userID, group)
}
// TSDB metrics collector. Each tenant has its own registry, that TSDB code uses.
diff --git a/pkg/ingester/owned_series_test.go b/pkg/ingester/owned_series_test.go
index ccb82cc3561..22d2a4bba9a 100644
--- a/pkg/ingester/owned_series_test.go
+++ b/pkg/ingester/owned_series_test.go
@@ -153,7 +153,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) {
testFunc func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits)
}{
"empty ingester": {
- testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) {
require.Equal(t, 0, c.ownedSeries.updateAllTenants(context.Background(), false))
},
},
@@ -164,7 +164,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) {
IngestionTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
// first ingester owns all the series, even without any ownedSeries run. this is because each created series is automatically counted as "owned".
@@ -192,7 +192,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) {
IngestionTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -223,7 +223,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) {
IngestionTenantShardSize: 1,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -272,7 +272,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) {
IngestionTenantShardSize: 1,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -320,7 +320,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) {
IngestionTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -550,7 +550,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) {
IngestionTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -578,7 +578,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) {
IngestionTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -722,12 +722,7 @@ func (c *ownedSeriesWithPartitionsRingTestContext) createIngesterAndPartitionRin
c.ing = ing
c.partitionsRing = prw
- // Ingester and partitions ring watcher are not started yet.
- require.NoError(t, services.StartAndAwaitRunning(context.Background(), c.partitionsRing))
- t.Cleanup(func() {
- require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c.partitionsRing))
- })
-
+ // Ingester is not started yet.
require.NoError(t, services.StartAndAwaitRunning(context.Background(), c.ing))
t.Cleanup(func() {
require.NoError(t, services.StopAndAwaitTerminated(context.Background(), c.ing))
@@ -811,7 +806,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) {
testFunc func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits)
}{
"empty ingester": {
- testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) {
require.Equal(t, 0, c.ownedSeries.updateAllTenants(context.Background(), false))
},
},
@@ -822,7 +817,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) {
IngestionPartitionsTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
// first ingester owns all the series, even without any ownedSeries run. this is because each created series is automatically counted as "owned".
@@ -850,7 +845,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) {
IngestionPartitionsTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -881,7 +876,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) {
IngestionPartitionsTenantShardSize: 1,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -931,7 +926,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) {
IngestionPartitionsTenantShardSize: 1,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -979,7 +974,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) {
IngestionPartitionsTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -1214,7 +1209,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) {
IngestionPartitionsTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -1242,7 +1237,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) {
IngestionPartitionsTenantShardSize: 0,
},
},
- testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) {
+ testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) {
c.pushUserSeries(t)
c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser)
c.checkUpdateReasonForUser(t, "")
@@ -1467,7 +1462,7 @@ func TestOwnedSeriesPartitionsRingStrategyRingChanged(t *testing.T) {
partitionRing.AddPartition(1, ring.PartitionActive, time.Now())
})
- t.Run("first call with active partition in the ring reports change", func(t *testing.T) {
+ t.Run("first call with active partition in the ring reports change", func(*testing.T) {
// State of the ring: 1: Active
checkExpectedRingChange(true)
// second call reports no change
diff --git a/pkg/mimir/mimir.go b/pkg/mimir/mimir.go
index bb7bd6965b4..6b42d748396 100644
--- a/pkg/mimir/mimir.go
+++ b/pkg/mimir/mimir.go
@@ -48,6 +48,7 @@ import (
alertstorelocal "github.com/grafana/mimir/pkg/alertmanager/alertstore/local"
"github.com/grafana/mimir/pkg/api"
"github.com/grafana/mimir/pkg/compactor"
+ "github.com/grafana/mimir/pkg/continuoustest"
"github.com/grafana/mimir/pkg/distributor"
"github.com/grafana/mimir/pkg/flusher"
"github.com/grafana/mimir/pkg/frontend"
@@ -135,6 +136,7 @@ type Config struct {
MemberlistKV memberlist.KVConfig `yaml:"memberlist"`
QueryScheduler scheduler.Config `yaml:"query_scheduler"`
UsageStats usagestats.Config `yaml:"usage_stats"`
+ ContinuousTest continuoustest.Config `yaml:"-"`
OverridesExporter exporter.Config `yaml:"overrides_exporter"`
Common CommonConfig `yaml:"common"`
@@ -192,6 +194,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) {
c.ActivityTracker.RegisterFlags(f)
c.QueryScheduler.RegisterFlags(f, logger)
c.UsageStats.RegisterFlags(f)
+ c.ContinuousTest.RegisterFlags(f)
c.OverridesExporter.RegisterFlags(f, logger)
c.Common.RegisterFlags(f)
@@ -720,6 +723,7 @@ type Mimir struct {
ActivityTracker *activitytracker.ActivityTracker
Vault *vault.Vault
UsageStatsReporter *usagestats.Reporter
+ ContinuousTestManager *continuoustest.Manager
BuildInfoHandler http.Handler
}
@@ -755,6 +759,10 @@ func New(cfg Config, reg prometheus.Registerer) (*Mimir, error) {
"/schedulerpb.SchedulerForQuerier/NotifyQuerierShutdown",
})
+ // Do not allow to configure potentially unsafe options until we've properly tested them in Mimir.
+ // These configuration options are hidden in the auto-generated documentation (see pkg/util/configdoc).
+ cfg.Server.GRPCServerRecvBufferPoolsEnabled = false
+
// Inject the registerer in the Server config too.
cfg.Server.Registerer = reg
diff --git a/pkg/mimir/mimir_test.go b/pkg/mimir/mimir_test.go
index 7ff85d2c99a..dde93f51e01 100644
--- a/pkg/mimir/mimir_test.go
+++ b/pkg/mimir/mimir_test.go
@@ -521,7 +521,7 @@ func TestConfig_validateFilesystemPaths(t *testing.T) {
expectedErr string
}{
"should succeed with the default configuration": {
- setup: func(cfg *Config) {},
+ setup: func(*Config) {},
},
"should fail if alertmanager data directory contains bucket store sync directory when running mimir-backend": {
setup: func(cfg *Config) {
diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go
index 60d7c3f1451..fb55498fd91 100644
--- a/pkg/mimir/modules.go
+++ b/pkg/mimir/modules.go
@@ -40,6 +40,7 @@ import (
"github.com/grafana/mimir/pkg/alertmanager/alertstore"
"github.com/grafana/mimir/pkg/api"
"github.com/grafana/mimir/pkg/compactor"
+ "github.com/grafana/mimir/pkg/continuoustest"
"github.com/grafana/mimir/pkg/distributor"
"github.com/grafana/mimir/pkg/flusher"
"github.com/grafana/mimir/pkg/frontend"
@@ -85,6 +86,7 @@ const (
Queryable string = "queryable"
StoreQueryable string = "store-queryable"
QueryFrontend string = "query-frontend"
+ QueryFrontendCodec string = "query-frontend-codec"
QueryFrontendTripperware string = "query-frontend-tripperware"
RulerStorage string = "ruler-storage"
Ruler string = "ruler"
@@ -96,6 +98,7 @@ const (
Vault string = "vault"
TenantFederation string = "tenant-federation"
UsageStats string = "usage-stats"
+ ContinuousTest string = "continuous-test"
All string = "all"
// Write Read and Backend are the targets used when using the read-write deployment mode.
@@ -618,7 +621,7 @@ func (t *Mimir) initQuerier() (serv services.Service, err error) {
}
// Add a middleware to extract the trace context and add a header.
- internalQuerierRouter = nethttp.MiddlewareFunc(opentracing.GlobalTracer(), internalQuerierRouter.ServeHTTP, nethttp.OperationNameFunc(func(r *http.Request) string {
+ internalQuerierRouter = nethttp.MiddlewareFunc(opentracing.GlobalTracer(), internalQuerierRouter.ServeHTTP, nethttp.OperationNameFunc(func(*http.Request) string {
return "internalQuerier"
}))
@@ -703,10 +706,16 @@ func (t *Mimir) initFlusher() (serv services.Service, err error) {
return t.Flusher, nil
}
+// initQueryFrontendCodec initializes query frontend codec.
+// NOTE: Grafana Enterprise Metrics depends on this.
+func (t *Mimir) initQueryFrontendCodec() (services.Service, error) {
+ t.QueryFrontendCodec = querymiddleware.NewPrometheusCodec(t.Registerer, t.Cfg.Frontend.QueryMiddleware.QueryResultResponseFormat)
+ return nil, nil
+}
+
// initQueryFrontendTripperware instantiates the tripperware used by the query frontend
// to optimize Prometheus query requests.
func (t *Mimir) initQueryFrontendTripperware() (serv services.Service, err error) {
- t.QueryFrontendCodec = querymiddleware.NewPrometheusCodec(t.Registerer, t.Cfg.Frontend.QueryMiddleware.QueryResultResponseFormat)
promqlEngineRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "query-frontend"}, t.Registerer)
engineOpts, engineExperimentalFunctionsEnabled := engine.NewPromQLEngineOptions(t.Cfg.Querier.EngineConfig, t.ActivityTracker, util_log.Logger, promqlEngineRegisterer)
@@ -1027,6 +1036,20 @@ func (t *Mimir) initUsageStats() (services.Service, error) {
return t.UsageStatsReporter, nil
}
+func (t *Mimir) initContinuousTest() (services.Service, error) {
+ client, err := continuoustest.NewClient(t.Cfg.ContinuousTest.Client, util_log.Logger)
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to initialize continuous-test client")
+ }
+
+ t.ContinuousTestManager = continuoustest.NewManager(t.Cfg.ContinuousTest.Manager, util_log.Logger)
+ t.ContinuousTestManager.AddTest(continuoustest.NewWriteReadSeriesTest(t.Cfg.ContinuousTest.WriteReadSeriesTest, client, util_log.Logger, t.Registerer))
+
+ return services.NewBasicService(nil, func(ctx context.Context) error {
+ return t.ContinuousTestManager.Run(ctx)
+ }, nil), nil
+}
+
func (t *Mimir) setupModuleManager() error {
mm := modules.NewManager(util_log.Logger)
@@ -1051,6 +1074,7 @@ func (t *Mimir) setupModuleManager() error {
mm.RegisterModule(Queryable, t.initQueryable, modules.UserInvisibleModule)
mm.RegisterModule(Querier, t.initQuerier)
mm.RegisterModule(StoreQueryable, t.initStoreQueryable, modules.UserInvisibleModule)
+ mm.RegisterModule(QueryFrontendCodec, t.initQueryFrontendCodec, modules.UserInvisibleModule)
mm.RegisterModule(QueryFrontendTripperware, t.initQueryFrontendTripperware, modules.UserInvisibleModule)
mm.RegisterModule(QueryFrontend, t.initQueryFrontend)
mm.RegisterModule(RulerStorage, t.initRulerStorage, modules.UserInvisibleModule)
@@ -1061,6 +1085,7 @@ func (t *Mimir) setupModuleManager() error {
mm.RegisterModule(QueryScheduler, t.initQueryScheduler)
mm.RegisterModule(TenantFederation, t.initTenantFederation, modules.UserInvisibleModule)
mm.RegisterModule(UsageStats, t.initUsageStats, modules.UserInvisibleModule)
+ mm.RegisterModule(ContinuousTest, t.initContinuousTest)
mm.RegisterModule(Vault, t.initVault, modules.UserInvisibleModule)
mm.RegisterModule(Write, nil)
mm.RegisterModule(Read, nil)
@@ -1085,7 +1110,7 @@ func (t *Mimir) setupModuleManager() error {
Queryable: {Overrides, DistributorService, IngesterRing, IngesterPartitionRing, API, StoreQueryable, MemberlistKV},
Querier: {TenantFederation, Vault},
StoreQueryable: {Overrides, MemberlistKV},
- QueryFrontendTripperware: {API, Overrides},
+ QueryFrontendTripperware: {API, Overrides, QueryFrontendCodec},
QueryFrontend: {QueryFrontendTripperware, MemberlistKV, Vault},
QueryScheduler: {API, Overrides, MemberlistKV, Vault},
Ruler: {DistributorService, StoreQueryable, RulerStorage, Vault},
@@ -1094,6 +1119,7 @@ func (t *Mimir) setupModuleManager() error {
Compactor: {API, MemberlistKV, Overrides, Vault},
StoreGateway: {API, Overrides, MemberlistKV, Vault},
TenantFederation: {Queryable},
+ ContinuousTest: {API},
Write: {Distributor, Ingester},
Read: {QueryFrontend, Querier},
Backend: {QueryScheduler, Ruler, StoreGateway, Compactor, AlertManager, OverridesExporter},
diff --git a/pkg/mimir/runtime_config_test.go b/pkg/mimir/runtime_config_test.go
index 81145988c33..5224becb5d6 100644
--- a/pkg/mimir/runtime_config_test.go
+++ b/pkg/mimir/runtime_config_test.go
@@ -132,7 +132,7 @@ func TestRuntimeConfigLoader_RunsValidation(t *testing.T) {
}{
{
name: "successful validate doesn't return error",
- validate: func(limits validation.Limits) error {
+ validate: func(validation.Limits) error {
return nil
},
},
@@ -141,7 +141,7 @@ func TestRuntimeConfigLoader_RunsValidation(t *testing.T) {
},
{
name: "unsuccessful validate returns error",
- validate: func(limits validation.Limits) error {
+ validate: func(validation.Limits) error {
return errors.New("validation failed")
},
hasError: true,
diff --git a/pkg/mimir/sanity_check_test.go b/pkg/mimir/sanity_check_test.go
index 23554ccea2f..74a7da2d4ac 100644
--- a/pkg/mimir/sanity_check_test.go
+++ b/pkg/mimir/sanity_check_test.go
@@ -216,19 +216,19 @@ func TestCheckDirectoryReadWriteAccess(t *testing.T) {
expected string
}{
"should fail on directory without write access": {
- dirExistsFn: func(dir string) (bool, error) {
+ dirExistsFn: func(string) (bool, error) {
return true, nil
},
- isDirReadWritable: func(dir string) error {
+ isDirReadWritable: func(string) error {
return errors.New("read only")
},
expected: fmt.Sprintf("failed to access directory %s: read only", configuredPath),
},
"should pass on directory with read-write access": {
- dirExistsFn: func(dir string) (bool, error) {
+ dirExistsFn: func(string) (bool, error) {
return true, nil
},
- isDirReadWritable: func(dir string) error {
+ isDirReadWritable: func(string) error {
return nil
},
expected: "",
diff --git a/pkg/mimirtool/analyze/grafana.go b/pkg/mimirtool/analyze/grafana.go
index ed7816a8126..0e8176df70c 100644
--- a/pkg/mimirtool/analyze/grafana.go
+++ b/pkg/mimirtool/analyze/grafana.go
@@ -217,7 +217,7 @@ func parseQuery(query string, metrics map[string]struct{}) error {
return err
}
- parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
+ parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error {
if n, ok := node.(*parser.VectorSelector); ok {
// VectorSelector has .Name when it's explicitly set as `name{...}`.
// Otherwise we need to look into the matchers.
diff --git a/pkg/mimirtool/analyze/ruler.go b/pkg/mimirtool/analyze/ruler.go
index 58f7a4da6b2..a33c83fe01c 100644
--- a/pkg/mimirtool/analyze/ruler.go
+++ b/pkg/mimirtool/analyze/ruler.go
@@ -48,7 +48,7 @@ func ParseMetricsInRuleGroup(mir *MetricsInRuler, group rwrulefmt.RuleGroup, ns
continue
}
- parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
+ parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error {
if n, ok := node.(*parser.VectorSelector); ok {
refMetrics[n.Name] = struct{}{}
}
diff --git a/pkg/mimirtool/commands/rules.go b/pkg/mimirtool/commands/rules.go
index 824c0982864..7690804b11f 100644
--- a/pkg/mimirtool/commands/rules.go
+++ b/pkg/mimirtool/commands/rules.go
@@ -680,7 +680,7 @@ func (r *RuleCommand) prepare(_ *kingpin.ParseContext) error {
}
// Do not apply the aggregation label to excluded rule groups.
- applyTo := func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool {
+ applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool {
_, excluded := r.aggregationLabelExcludedRuleGroupsList[group.Name]
return !excluded
}
diff --git a/pkg/mimirtool/config/convert_test.go b/pkg/mimirtool/config/convert_test.go
index 34db4cf2d07..52585f8b931 100644
--- a/pkg/mimirtool/config/convert_test.go
+++ b/pkg/mimirtool/config/convert_test.go
@@ -323,7 +323,7 @@ func TestConvert_Cortex(t *testing.T) {
flags: loadFlags(t, tc.inFlagsFile),
}
- assertion := func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ assertion := func(t *testing.T, outYAML []byte, outFlags []string, _ ConversionNotices, err error) {
assert.NoError(t, err)
assert.ElementsMatch(t, expectedOutFlags, outFlags)
assert.YAMLEq(t, string(expectedOut), string(outYAML))
@@ -396,7 +396,7 @@ func TestConvert_GEM(t *testing.T) {
flags: loadFlags(t, tc.inFlagsFile),
}
- testConvertGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ testConvertGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, _ ConversionNotices, err error) {
assert.NoError(t, err)
assert.ElementsMatch(t, expectedOutFlags, outFlags)
assert.YAMLEq(t, string(expectedOut), string(outYAML))
@@ -434,7 +434,7 @@ func TestConvert_InvalidConfigs(t *testing.T) {
yaml: loadFile(t, tc.inFile),
dontLoadCommonOpts: true,
}
- testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ testConvertCortexAndGEM(t, in, func(t *testing.T, _ []byte, _ []string, _ ConversionNotices, err error) {
assert.EqualError(t, err, tc.expectedErr)
})
})
@@ -604,7 +604,7 @@ func TestChangedCortexDefaults(t *testing.T) {
yaml: config,
}
- testConvertCortex(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ testConvertCortex(t, in, func(t *testing.T, _ []byte, _ []string, notices ConversionNotices, err error) {
require.NoError(t, err)
assert.ElementsMatch(t, changedCortexDefaults, notices.ChangedDefaults)
})
@@ -670,7 +670,7 @@ func TestChangedGEMDefaults(t *testing.T) {
yaml: config,
}
- testConvertGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ testConvertGEM(t, in, func(t *testing.T, _ []byte, _ []string, notices ConversionNotices, err error) {
require.NoError(t, err)
assert.ElementsMatch(t, expectedChangedDefaults, notices.ChangedDefaults)
})
@@ -726,7 +726,7 @@ func TestConvert_UseNewDefaults(t *testing.T) {
useNewDefaults: tc.useNewDefaults,
}
- testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, _ []string, notices ConversionNotices, err error) {
require.NoError(t, err)
assert.YAMLEq(t, string(tc.expectedYAML), string(outYAML))
@@ -753,7 +753,7 @@ func TestConvert_NotInYAMLIsNotPrinted(t *testing.T) {
outputDefaults: showDefaults,
dontLoadCommonOpts: true,
}
- testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, _ []string, _ ConversionNotices, err error) {
assert.NoError(t, err)
assert.NotContains(t, string(outYAML), notInYaml)
})
@@ -770,7 +770,7 @@ func TestConvert_PassingOnlyYAMLReturnsOnlyYAML(t *testing.T) {
yaml: inYAML,
}
- testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, _ ConversionNotices, err error) {
assert.NoError(t, err)
assert.YAMLEq(t, string(expectedOutYAML), string(outYAML))
assert.Empty(t, outFlags)
@@ -785,7 +785,7 @@ func TestConvert_PassingOnlyFlagsReturnsOnlyFlags(t *testing.T) {
flags: inFlags,
}
- testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) {
+ testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, _ ConversionNotices, err error) {
assert.NoError(t, err)
assert.Empty(t, outYAML)
assert.ElementsMatch(t, expectedOutFlags, outFlags)
@@ -809,7 +809,7 @@ func TestRemovedParamsAndFlagsAreCorrect(t *testing.T) {
allCLIFlagsNames := func(p Parameters) map[string]bool {
flags := map[string]bool{}
- assert.NoError(t, p.Walk(func(path string, v Value) error {
+ assert.NoError(t, p.Walk(func(path string, _ Value) error {
flagName, err := p.GetFlag(path)
assert.NoError(t, err)
flags[flagName] = true
diff --git a/pkg/mimirtool/config/cortex.go b/pkg/mimirtool/config/cortex.go
index d259fc9c6ec..0387d0136ad 100644
--- a/pkg/mimirtool/config/cortex.go
+++ b/pkg/mimirtool/config/cortex.go
@@ -405,7 +405,7 @@ func mapRulerAlertmanagerS3URL(dotStoragePath, storagePath string) MapperFunc {
if s3URL.User != nil {
username := s3URL.User.Username()
password, _ := s3URL.User.Password()
- setIfNonEmpty := func(p Parameters, path, val string) error {
+ setIfNonEmpty := func(_ Parameters, path, val string) error {
currentVal, _ := target.GetValue(path)
currentStr := currentVal.AsString()
if val == "" || currentStr != "" {
diff --git a/pkg/mimirtool/config/mapping.go b/pkg/mimirtool/config/mapping.go
index b69aa6533bf..25332c117a1 100644
--- a/pkg/mimirtool/config/mapping.go
+++ b/pkg/mimirtool/config/mapping.go
@@ -69,7 +69,7 @@ func (m MapperFunc) DoMap(source, target Parameters) error {
}
func RenameMapping(to string) Mapping {
- return func(oldPath string, oldVal Value) (newPath string, newVal Value) {
+ return func(_ string, oldVal Value) (newPath string, newVal Value) {
newPath = to
newVal = oldVal
return
diff --git a/pkg/mimirtool/rules/rules.go b/pkg/mimirtool/rules/rules.go
index 7f0b7da1f2f..d2146cd1d52 100644
--- a/pkg/mimirtool/rules/rules.go
+++ b/pkg/mimirtool/rules/rules.go
@@ -157,7 +157,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
// exprNodeInspectorFunc returns a PromQL inspector.
// It modifies most PromQL expressions to include a given label.
func exprNodeInspectorFunc(rule rulefmt.RuleNode, label string) func(node parser.Node, path []parser.Node) error {
- return func(node parser.Node, path []parser.Node) error {
+ return func(node parser.Node, _ []parser.Node) error {
var err error
switch n := node.(type) {
case *parser.AggregateExpr:
diff --git a/pkg/mimirtool/rules/rules_test.go b/pkg/mimirtool/rules/rules_test.go
index 74294602be0..28613490f86 100644
--- a/pkg/mimirtool/rules/rules_test.go
+++ b/pkg/mimirtool/rules/rules_test.go
@@ -181,7 +181,7 @@ func TestAggregateBy(t *testing.T) {
},
},
},
- applyTo: func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool {
+ applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool {
return group.Name != "CountSkipped"
},
expectedExpr: []string{`count by (namespace, cluster) (test_series) > 1`, `count by (namespace) (test_series) > 1`},
diff --git a/pkg/querier/block_test.go b/pkg/querier/block_test.go
index 483e919012b..5036bd3de67 100644
--- a/pkg/querier/block_test.go
+++ b/pkg/querier/block_test.go
@@ -304,7 +304,7 @@ func TestBlockQuerierSeriesSet(t *testing.T) {
t.Run(fmt.Sprintf("consume with .Next() method, perform .At() after every %dth call to .Next()", callAtEvery), func(t *testing.T) {
t.Parallel()
- advance := func(it chunkenc.Iterator, wantTs int64) chunkenc.ValueType { return it.Next() }
+ advance := func(it chunkenc.Iterator, _ int64) chunkenc.ValueType { return it.Next() }
ss := getSeriesSet()
verifyNextSeries(t, ss, labels.FromStrings("__name__", "first", "a", "a"), 3*time.Millisecond, []timeRange{
diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go
index f9dfa3a3858..1358ccd0925 100644
--- a/pkg/querier/blocks_store_queryable_test.go
+++ b/pkg/querier/blocks_store_queryable_test.go
@@ -1046,7 +1046,7 @@ func TestBlocksStoreQuerier_ShouldReturnContextCanceledIfContextWasCanceledWhile
srv, q := prepareTestCase(t)
- srv.onSeries = func(req *storepb.SeriesRequest, srv storegatewaypb.StoreGateway_SeriesServer) error {
+ srv.onSeries = func(*storepb.SeriesRequest, storegatewaypb.StoreGateway_SeriesServer) error {
if numExecutions.Inc() == 1 {
close(waitExecution)
<-continueExecution
@@ -1082,7 +1082,7 @@ func TestBlocksStoreQuerier_ShouldReturnContextCanceledIfContextWasCanceledWhile
srv, q := prepareTestCase(t)
- srv.onLabelNames = func(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) {
+ srv.onLabelNames = func(context.Context, *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) {
if numExecutions.Inc() == 1 {
close(waitExecution)
<-continueExecution
@@ -1117,7 +1117,7 @@ func TestBlocksStoreQuerier_ShouldReturnContextCanceledIfContextWasCanceledWhile
srv, q := prepareTestCase(t)
- srv.onLabelValues = func(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) {
+ srv.onLabelValues = func(context.Context, *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) {
if numExecutions.Inc() == 1 {
close(waitExecution)
<-continueExecution
diff --git a/pkg/querier/blocks_store_replicated_set_test.go b/pkg/querier/blocks_store_replicated_set_test.go
index c69f79a381b..5b6ec13ab09 100644
--- a/pkg/querier/blocks_store_replicated_set_test.go
+++ b/pkg/querier/blocks_store_replicated_set_test.go
@@ -322,7 +322,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor(t *testing.T) {
ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil)
t.Cleanup(func() { assert.NoError(t, closer.Close()) })
- require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) {
+ require.NoError(t, ringStore.CAS(ctx, "test", func(interface{}) (interface{}, bool, error) {
d := ring.NewDesc()
testData.setup(d)
return d, true, nil
@@ -389,7 +389,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ShouldSupportRandomLoadBalancin
ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil)
t.Cleanup(func() { assert.NoError(t, closer.Close()) })
- require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) {
+ require.NoError(t, ringStore.CAS(ctx, "test", func(interface{}) (interface{}, bool, error) {
d := ring.NewDesc()
for n := 1; n <= numInstances; n++ {
d.AddIngester(fmt.Sprintf("instance-%d", n), fmt.Sprintf("127.0.0.%d", n), "", []uint32{uint32(n)}, ring.ACTIVE, registeredAt)
diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go
index b4e2d85ad57..df95d36e0e7 100644
--- a/pkg/querier/querier_test.go
+++ b/pkg/querier/querier_test.go
@@ -1284,10 +1284,10 @@ func TestConfig_ValidateLimits(t *testing.T) {
expected error
}{
"should pass with default config": {
- setup: func(cfg *Config, limits *validation.Limits) {},
+ setup: func(*Config, *validation.Limits) {},
},
"should pass if 'query store after' is enabled and shuffle-sharding is disabled": {
- setup: func(cfg *Config, limits *validation.Limits) {
+ setup: func(cfg *Config, _ *validation.Limits) {
cfg.QueryStoreAfter = time.Hour
},
},
diff --git a/pkg/querier/remote_read_test.go b/pkg/querier/remote_read_test.go
index 8379ffd5c9d..1f071f61618 100644
--- a/pkg/querier/remote_read_test.go
+++ b/pkg/querier/remote_read_test.go
@@ -102,7 +102,7 @@ func (p *partiallyFailingSeriesSet) Warnings() annotations.Annotations {
func TestSampledRemoteRead(t *testing.T) {
q := &mockSampleAndChunkQueryable{
- queryableFn: func(mint, maxt int64) (storage.Querier, error) {
+ queryableFn: func(int64, int64) (storage.Querier, error) {
return mockQuerier{
seriesSet: series.NewConcreteSeriesSetFromUnsortedSeries([]storage.Series{
series.NewConcreteSeries(
@@ -326,7 +326,7 @@ func TestStreamedRemoteRead(t *testing.T) {
for tn, tc := range tcs {
t.Run(tn, func(t *testing.T) {
q := &mockSampleAndChunkQueryable{
- chunkQueryableFn: func(mint, maxt int64) (storage.ChunkQuerier, error) {
+ chunkQueryableFn: func(int64, int64) (storage.ChunkQuerier, error) {
return mockChunkQuerier{
seriesSet: series.NewConcreteSeriesSetFromUnsortedSeries([]storage.Series{
series.NewConcreteSeries(
@@ -519,7 +519,7 @@ func TestRemoteReadErrorParsing(t *testing.T) {
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
q := &mockSampleAndChunkQueryable{
- queryableFn: func(mint, maxt int64) (storage.Querier, error) {
+ queryableFn: func(int64, int64) (storage.Querier, error) {
return mockQuerier{
seriesSet: tc.seriesSet,
}, tc.getQuerierErr
@@ -555,7 +555,7 @@ func TestRemoteReadErrorParsing(t *testing.T) {
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
q := &mockSampleAndChunkQueryable{
- chunkQueryableFn: func(mint, maxt int64) (storage.ChunkQuerier, error) {
+ chunkQueryableFn: func(int64, int64) (storage.ChunkQuerier, error) {
return mockChunkQuerier{
seriesSet: tc.seriesSet,
}, tc.getQuerierErr
diff --git a/pkg/querier/tenantfederation/merge_exemplar_queryable.go b/pkg/querier/tenantfederation/merge_exemplar_queryable.go
index 7436d62e816..51c441c0d35 100644
--- a/pkg/querier/tenantfederation/merge_exemplar_queryable.go
+++ b/pkg/querier/tenantfederation/merge_exemplar_queryable.go
@@ -176,7 +176,7 @@ func (m *mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Ma
// Each task grabs a job object from the slice and stores its results in the corresponding
// index in the results slice. The job handles performing a tenant-specific exemplar query
// and adding a tenant ID label to each of the results.
- run := func(ctx context.Context, idx int) error {
+ run := func(_ context.Context, idx int) error {
job := jobs[idx]
res, err := job.querier.Select(start, end, job.matchers...)
@@ -217,7 +217,7 @@ func filterTenantsAndRewriteMatchers(idLabelName string, ids []string, allMatche
return sliceToSet(ids), allMatchers
}
- outIds := make(map[string]struct{})
+ outIDs := make(map[string]struct{})
outMatchers := make([][]*labels.Matcher, len(allMatchers))
// The ExemplarQuerier.Select method accepts a slice of slices of matchers. The matchers within
@@ -225,13 +225,13 @@ func filterTenantsAndRewriteMatchers(idLabelName string, ids []string, allMatche
// In order to support that, we start with a set of 0 tenant IDs and add any tenant IDs that remain
// after filtering (based on the inner slice of matchers), for each outer slice.
for i, matchers := range allMatchers {
- filteredIds, unrelatedMatchers := filterValuesByMatchers(idLabelName, ids, matchers...)
- for k := range filteredIds {
- outIds[k] = struct{}{}
+ filteredIDs, unrelatedMatchers := filterValuesByMatchers(idLabelName, ids, matchers...)
+ for k := range filteredIDs {
+ outIDs[k] = struct{}{}
}
outMatchers[i] = unrelatedMatchers
}
- return outIds, outMatchers
+ return outIDs, outMatchers
}
diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go
index 285b5f602d8..59b185da381 100644
--- a/pkg/querier/tenantfederation/merge_queryable.go
+++ b/pkg/querier/tenantfederation/merge_queryable.go
@@ -9,6 +9,7 @@ import (
"context"
"sort"
"strings"
+ "time"
"github.com/go-kit/log"
"github.com/grafana/dskit/concurrency"
@@ -118,17 +119,27 @@ func NewMergeQueryable(idLabelName string, callbacks MergeQueryableCallbacks, re
Help: "Number of tenants queried for a single standard query.",
Buckets: []float64{1, 2, 4, 8, 16, 32},
}),
+
+ // Experimental: Observe time to kick off upstream query jobs as a native histogram
+ upstreamQueryWaitDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+ Name: "cortex_querier_federation_upstream_query_wait_duration_seconds",
+ Help: "Time spent waiting to run upstream queries",
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ }),
}
}
type mergeQueryable struct {
- logger log.Logger
- idLabelName string
- bypassWithSingleID bool
- callbacks MergeQueryableCallbacks
- resolver tenant.Resolver
- maxConcurrency int
- tenantsQueried prometheus.Histogram
+ logger log.Logger
+ idLabelName string
+ bypassWithSingleID bool
+ callbacks MergeQueryableCallbacks
+ resolver tenant.Resolver
+ maxConcurrency int
+ tenantsQueried prometheus.Histogram
+ upstreamQueryWaitDuration prometheus.Histogram
}
// Querier returns a new mergeQuerier, which aggregates results for multiple federation IDs
@@ -139,14 +150,15 @@ func (m *mergeQueryable) Querier(mint int64, maxt int64) (storage.Querier, error
return nil, err
}
return &mergeQuerier{
- logger: m.logger,
- idLabelName: m.idLabelName,
- callbacks: m.callbacks,
- resolver: m.resolver,
- upstream: upstream,
- maxConcurrency: m.maxConcurrency,
- bypassWithSingleID: m.bypassWithSingleID,
- tenantsQueried: m.tenantsQueried,
+ logger: m.logger,
+ idLabelName: m.idLabelName,
+ callbacks: m.callbacks,
+ resolver: m.resolver,
+ upstream: upstream,
+ maxConcurrency: m.maxConcurrency,
+ bypassWithSingleID: m.bypassWithSingleID,
+ tenantsQueried: m.tenantsQueried,
+ upstreamQueryWaitDuration: m.upstreamQueryWaitDuration,
}, nil
}
@@ -156,14 +168,15 @@ func (m *mergeQueryable) Querier(mint int64, maxt int64) (storage.Querier, error
// the previous value is exposed through a new label prefixed with "original_".
// This behaviour is not implemented recursively
type mergeQuerier struct {
- logger log.Logger
- callbacks MergeQueryableCallbacks
- resolver tenant.Resolver
- upstream MergeQuerierUpstream
- idLabelName string
- maxConcurrency int
- bypassWithSingleID bool
- tenantsQueried prometheus.Histogram
+ logger log.Logger
+ callbacks MergeQueryableCallbacks
+ resolver tenant.Resolver
+ upstream MergeQuerierUpstream
+ idLabelName string
+ maxConcurrency int
+ bypassWithSingleID bool
+ tenantsQueried prometheus.Histogram
+ upstreamQueryWaitDuration prometheus.Histogram
}
// LabelValues returns all potential values for a label name given involved federation IDs.
@@ -322,6 +335,7 @@ func (m *mergeQuerier) Close() error {
// If the `idLabelName` is matched on, it only considers matching IDs.
// The forwarded labelSelector does not contain those that operate on `idLabelName`.
func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
+ start := time.Now()
ids, err := m.resolver.TenantIDs(ctx)
if err != nil {
return storage.ErrSeriesSet(err)
@@ -346,6 +360,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora
// We don't use the context passed to this function, since the context has to live longer
// than the call to ForEachJob (i.e. as long as seriesSets)
run := func(_ context.Context, idx int) error {
+ m.upstreamQueryWaitDuration.Observe(time.Since(start).Seconds())
id := jobs[idx]
seriesSets[idx] = &addLabelsSeriesSet{
upstream: m.upstream.Select(ctx, id, sortSeries, hints, filteredMatchers...),
diff --git a/pkg/querier/worker/frontend_processor_test.go b/pkg/querier/worker/frontend_processor_test.go
index daec7e8e102..aff6fddc212 100644
--- a/pkg/querier/worker/frontend_processor_test.go
+++ b/pkg/querier/worker/frontend_processor_test.go
@@ -72,12 +72,12 @@ func TestFrontendProcessor_processQueriesOnSingleStream(t *testing.T) {
workerCtx, workerCancel := context.WithCancel(context.Background())
- requestHandler.On("Handle", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
+ requestHandler.On("Handle", mock.Anything, mock.Anything).Run(func(mock.Arguments) {
// Cancel the worker context while the query execution is in progress.
workerCancel()
// Ensure the execution context hasn't been canceled yet.
- require.Nil(t, processClient.Context().Err())
+ require.NoError(t, processClient.Context().Err())
// Intentionally slow down the query execution, to double check the worker waits until done.
time.Sleep(time.Second)
diff --git a/pkg/querier/worker/scheduler_processor_test.go b/pkg/querier/worker/scheduler_processor_test.go
index 898fe59bdff..18291bea3d1 100644
--- a/pkg/querier/worker/scheduler_processor_test.go
+++ b/pkg/querier/worker/scheduler_processor_test.go
@@ -83,7 +83,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) {
workerCtx, workerCancel := context.WithCancel(context.Background())
- requestHandler.On("Handle", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
+ requestHandler.On("Handle", mock.Anything, mock.Anything).Run(func(mock.Arguments) {
// Cancel the worker context while the query execution is in progress.
workerCancel()
@@ -405,7 +405,7 @@ func TestSchedulerProcessor_ResponseStream(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
requestHandler.On("Handle", mock.Anything, mock.Anything).Run(
- func(arguments mock.Arguments) { cancel() },
+ func(mock.Arguments) { cancel() },
).Return(returnResponses(responses)())
reqProcessor.processQueriesOnSingleStream(ctx, nil, "127.0.0.1")
diff --git a/pkg/querier/worker/worker_test.go b/pkg/querier/worker/worker_test.go
index 993637ebd92..5465851c9b0 100644
--- a/pkg/querier/worker/worker_test.go
+++ b/pkg/querier/worker/worker_test.go
@@ -29,7 +29,7 @@ func TestConfig_Validate(t *testing.T) {
expectedErr string
}{
"should pass with default config": {
- setup: func(cfg *Config) {},
+ setup: func(*Config) {},
},
"should pass if frontend address is configured, but not scheduler address": {
setup: func(cfg *Config) {
diff --git a/pkg/ruler/api_test.go b/pkg/ruler/api_test.go
index 8f5b1df3a5b..481f3cfaaa8 100644
--- a/pkg/ruler/api_test.go
+++ b/pkg/ruler/api_test.go
@@ -298,7 +298,7 @@ func TestRuler_PrometheusRules(t *testing.T) {
},
},
expectedConfigured: 1,
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) {
tenantLimits[userID] = validation.MockDefaultLimits()
tenantLimits[userID].RulerRecordingRulesEvaluationEnabled = true
tenantLimits[userID].RulerAlertingRulesEvaluationEnabled = false
@@ -330,7 +330,7 @@ func TestRuler_PrometheusRules(t *testing.T) {
},
},
expectedConfigured: 1,
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) {
tenantLimits[userID] = validation.MockDefaultLimits()
tenantLimits[userID].RulerRecordingRulesEvaluationEnabled = false
tenantLimits[userID].RulerAlertingRulesEvaluationEnabled = true
@@ -364,7 +364,7 @@ func TestRuler_PrometheusRules(t *testing.T) {
},
},
expectedConfigured: 0,
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) {
tenantLimits[userID] = validation.MockDefaultLimits()
tenantLimits[userID].RulerRecordingRulesEvaluationEnabled = false
tenantLimits[userID].RulerAlertingRulesEvaluationEnabled = false
diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go
index c51bd33a41c..539be45fcb7 100644
--- a/pkg/ruler/compat_test.go
+++ b/pkg/ruler/compat_test.go
@@ -379,7 +379,7 @@ func TestMetricsQueryFuncErrors(t *testing.T) {
queries := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
failures := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
- mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) {
+ mockFunc := func(context.Context, string, time.Time) (promql.Vector, error) {
return promql.Vector{}, tc.returnedError
}
qf := MetricsQueryFunc(mockFunc, queries, failures, tc.remoteQuerier)
@@ -397,7 +397,7 @@ func TestRecordAndReportRuleQueryMetrics(t *testing.T) {
queryTime := promauto.With(nil).NewCounterVec(prometheus.CounterOpts{}, []string{"user"})
zeroFetchedSeriesCount := promauto.With(nil).NewCounterVec(prometheus.CounterOpts{}, []string{"user"})
- mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) {
+ mockFunc := func(context.Context, string, time.Time) (promql.Vector, error) {
time.Sleep(1 * time.Second)
return promql.Vector{}, nil
}
diff --git a/pkg/ruler/remotequerier_test.go b/pkg/ruler/remotequerier_test.go
index 37decb97d9a..d01802013a0 100644
--- a/pkg/ruler/remotequerier_test.go
+++ b/pkg/ruler/remotequerier_test.go
@@ -42,7 +42,7 @@ func TestRemoteQuerier_Read(t *testing.T) {
setup := func() (mockHTTPGRPCClient, *httpgrpc.HTTPRequest) {
var inReq httpgrpc.HTTPRequest
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(_ context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
inReq = *req
b, err := proto.Marshal(&prompb.ReadResponse{
@@ -97,7 +97,7 @@ func TestRemoteQuerier_Read(t *testing.T) {
}
func TestRemoteQuerier_ReadReqTimeout(t *testing.T) {
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(ctx context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
<-ctx.Done()
return nil, ctx.Err()
}
@@ -117,7 +117,7 @@ func TestRemoteQuerier_Query(t *testing.T) {
setup := func() (mockHTTPGRPCClient, *httpgrpc.HTTPRequest) {
var inReq httpgrpc.HTTPRequest
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(_ context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
inReq = *req
return &httpgrpc.HTTPResponse{
@@ -266,7 +266,7 @@ func TestRemoteQuerier_QueryRetryOnFailure(t *testing.T) {
var count atomic.Int64
ctx, cancel := context.WithCancel(context.Background())
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
count.Add(1)
if testCase.err != nil {
if grpcutil.IsCanceled(testCase.err) {
@@ -396,7 +396,7 @@ func TestRemoteQuerier_QueryJSONDecoding(t *testing.T) {
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
return &httpgrpc.HTTPResponse{
Code: http.StatusOK,
Headers: []*httpgrpc.Header{
@@ -664,7 +664,7 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) {
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
b, err := scenario.body.Marshal()
if err != nil {
return nil, err
@@ -692,7 +692,7 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) {
}
func TestRemoteQuerier_QueryUnknownResponseContentType(t *testing.T) {
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
return &httpgrpc.HTTPResponse{
Code: http.StatusOK,
Headers: []*httpgrpc.Header{
@@ -709,7 +709,7 @@ func TestRemoteQuerier_QueryUnknownResponseContentType(t *testing.T) {
}
func TestRemoteQuerier_QueryReqTimeout(t *testing.T) {
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(ctx context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
<-ctx.Done()
return nil, ctx.Err()
}
@@ -767,7 +767,7 @@ func TestRemoteQuerier_StatusErrorResponses(t *testing.T) {
}
for testName, testCase := range testCases {
t.Run(testName, func(t *testing.T) {
- mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
+ mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {
return testCase.resp, testCase.err
}
logger := newLoggerWithCounter()
diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go
index 1fb9d210949..cd3384c353b 100644
--- a/pkg/ruler/ruler.go
+++ b/pkg/ruler/ruler.go
@@ -1290,7 +1290,7 @@ func (r *Ruler) notifySyncRules(ctx context.Context, userIDs []string) {
// the client-side gRPC instrumentation fails.
ctx = user.InjectOrgID(ctx, "")
- errs.Add(r.forEachRulerInTheRing(ctx, r.ring, RuleSyncRingOp, func(ctx context.Context, inst *ring.InstanceDesc, rulerClient RulerClient, rulerClientErr error) error {
+ errs.Add(r.forEachRulerInTheRing(ctx, r.ring, RuleSyncRingOp, func(ctx context.Context, _ *ring.InstanceDesc, rulerClient RulerClient, rulerClientErr error) error {
var err error
if rulerClientErr != nil {
diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go
index a4291006bda..f848a4ce05f 100644
--- a/pkg/ruler/ruler_test.go
+++ b/pkg/ruler/ruler_test.go
@@ -224,10 +224,10 @@ func prepareRuler(t *testing.T, cfg Config, storage rulestore.RuleStore, opts ..
func prepareRulerManager(t *testing.T, cfg Config, opts ...prepareOption) *DefaultMultiTenantManager {
options := applyPrepareOptions(t, cfg.Ring.Common.InstanceID, opts...)
- noopQueryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) {
+ noopQueryable := storage.QueryableFunc(func(int64, int64) (storage.Querier, error) {
return storage.NoopQuerier(), nil
})
- noopQueryFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) {
+ noopQueryFunc := func(context.Context, string, time.Time) (promql.Vector, error) {
return nil, nil
}
@@ -249,7 +249,7 @@ func TestNotifierSendsUserIDHeader(t *testing.T) {
// We do expect 1 API call for the user create with the getOrCreateNotifier()
wg.Add(1)
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ts := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r)
require.NoError(t, err)
assert.Equal(t, "1", userID)
@@ -1019,7 +1019,7 @@ func TestRuler_NotifySyncRulesAsync_ShouldTriggerRulesSyncingOnAllRulersWhenEnab
rulerCfg.Ring.Common.InstanceAddr = rulerAddr
rulerCfg.Ring.Common.KVStore = kv.Config{Mock: kvStore}
- limits := validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits := validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) {
defaults.RulerTenantShardSize = rulerShardSize
})
@@ -1674,7 +1674,7 @@ func TestFilterRuleGroupsByEnabled(t *testing.T) {
createRuleGroup("group-3", "user-2", createAlertingRule("alert-6", "6"), createAlertingRule("alert-7", "7")),
},
},
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) {
tenantLimits["user-1"] = validation.MockDefaultLimits()
tenantLimits["user-1"].RulerRecordingRulesEvaluationEnabled = true
tenantLimits["user-1"].RulerAlertingRulesEvaluationEnabled = false
@@ -1704,7 +1704,7 @@ func TestFilterRuleGroupsByEnabled(t *testing.T) {
createRuleGroup("group-3", "user-2", createAlertingRule("alert-6", "6"), createAlertingRule("alert-7", "7")),
},
},
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) {
tenantLimits["user-1"] = validation.MockDefaultLimits()
tenantLimits["user-1"].RulerRecordingRulesEvaluationEnabled = false
tenantLimits["user-1"].RulerAlertingRulesEvaluationEnabled = true
@@ -1734,7 +1734,7 @@ func TestFilterRuleGroupsByEnabled(t *testing.T) {
createRuleGroup("group-3", "user-2", createAlertingRule("alert-6", "6"), createAlertingRule("alert-7", "7")),
},
},
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) {
tenantLimits["user-1"] = validation.MockDefaultLimits()
tenantLimits["user-1"].RulerRecordingRulesEvaluationEnabled = false
tenantLimits["user-1"].RulerAlertingRulesEvaluationEnabled = false
@@ -1760,7 +1760,7 @@ func TestFilterRuleGroupsByEnabled(t *testing.T) {
createRuleGroup("group-3", "user-2", createAlertingRule("alert-6", "6"), createAlertingRule("alert-7", "7")),
},
},
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) {
defaults.RulerRecordingRulesEvaluationEnabled = false
defaults.RulerAlertingRulesEvaluationEnabled = false
}),
@@ -1909,17 +1909,17 @@ func BenchmarkFilterRuleGroupsByEnabled(b *testing.B) {
limits: validation.MockDefaultOverrides(),
},
"recording rules disabled": {
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) {
defaults.RulerRecordingRulesEvaluationEnabled = false
}),
},
"alerting rules disabled": {
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) {
defaults.RulerAlertingRulesEvaluationEnabled = false
}),
},
"all rules disabled": {
- limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) {
+ limits: validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) {
defaults.RulerRecordingRulesEvaluationEnabled = false
defaults.RulerAlertingRulesEvaluationEnabled = false
}),
diff --git a/pkg/ruler/rulestore/config_test.go b/pkg/ruler/rulestore/config_test.go
index cf46d5f44b6..db6bee19317 100644
--- a/pkg/ruler/rulestore/config_test.go
+++ b/pkg/ruler/rulestore/config_test.go
@@ -24,7 +24,7 @@ func TestIsDefaults(t *testing.T) {
expected: true,
},
"should return false if the config contains zero values": {
- setup: func(cfg *Config) {},
+ setup: func(*Config) {},
expected: false,
},
"should return false if the config contains default values and some overrides": {
diff --git a/pkg/scheduler/schedulerdiscovery/config_test.go b/pkg/scheduler/schedulerdiscovery/config_test.go
index 5681bff9458..a7c3935244c 100644
--- a/pkg/scheduler/schedulerdiscovery/config_test.go
+++ b/pkg/scheduler/schedulerdiscovery/config_test.go
@@ -16,7 +16,7 @@ func TestConfig_Validate(t *testing.T) {
expectedErr string
}{
"should pass with default config": {
- setup: func(cfg *Config) {},
+ setup: func(*Config) {},
},
"should fail if service discovery mode is invalid": {
setup: func(cfg *Config) {
diff --git a/pkg/storage/bucket/s3/bucket_client.go b/pkg/storage/bucket/s3/bucket_client.go
index cdd60fdeb09..e304bb5d7ad 100644
--- a/pkg/storage/bucket/s3/bucket_client.go
+++ b/pkg/storage/bucket/s3/bucket_client.go
@@ -60,6 +60,7 @@ func newS3Config(cfg Config) (s3.Config, error) {
SendContentMd5: cfg.SendContentMd5,
SSEConfig: sseCfg,
ListObjectsVersion: cfg.ListObjectsVersion,
+ BucketLookupType: cfg.BucketLookupType,
AWSSDKAuth: cfg.NativeAWSAuthEnabled,
PartSize: cfg.PartSize,
HTTPConfig: s3.HTTPConfig{
diff --git a/pkg/storage/bucket/s3/config.go b/pkg/storage/bucket/s3/config.go
index b4b5631eac3..bbeb0379d08 100644
--- a/pkg/storage/bucket/s3/config.go
+++ b/pkg/storage/bucket/s3/config.go
@@ -10,6 +10,7 @@ import (
"flag"
"fmt"
"net/http"
+ "slices"
"strings"
"time"
@@ -36,9 +37,11 @@ const (
)
var (
- supportedSignatureVersions = []string{SignatureVersionV4, SignatureVersionV2}
- supportedSSETypes = []string{SSEKMS, SSES3}
- supportedStorageClasses = s3_service.ObjectStorageClass_Values()
+ supportedSignatureVersions = []string{SignatureVersionV4, SignatureVersionV2}
+ supportedSSETypes = []string{SSEKMS, SSES3}
+ supportedStorageClasses = s3_service.ObjectStorageClass_Values()
+ supportedBucketLookupTypes = thanosS3BucketLookupTypesValues()
+
errUnsupportedSignatureVersion = fmt.Errorf("unsupported signature version (supported values: %s)", strings.Join(supportedSignatureVersions, ", "))
errUnsupportedSSEType = errors.New("unsupported S3 SSE type")
errUnsupportedStorageClass = fmt.Errorf("unsupported S3 storage class (supported values: %s)", strings.Join(supportedStorageClasses, ", "))
@@ -47,6 +50,21 @@ var (
errInvalidSTSEndpoint = errors.New("sts-endpoint must be a valid url")
)
+var thanosS3BucketLookupTypes = map[string]s3.BucketLookupType{
+ s3.AutoLookup.String(): s3.AutoLookup,
+ s3.VirtualHostLookup.String(): s3.VirtualHostLookup,
+ s3.PathLookup.String(): s3.PathLookup,
+}
+
+func thanosS3BucketLookupTypesValues() (list []string) {
+ for k := range thanosS3BucketLookupTypes {
+ list = append(list, k)
+ }
+ // sort the list for consistent output in help, where it's used
+ slices.Sort(list)
+ return list
+}
+
// HTTPConfig stores the http.Transport configuration for the s3 minio client.
type HTTPConfig struct {
IdleConnTimeout time.Duration `yaml:"idle_conn_timeout" category:"advanced"`
@@ -76,19 +94,20 @@ func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
// Config holds the config options for an S3 backend
type Config struct {
- Endpoint string `yaml:"endpoint"`
- Region string `yaml:"region"`
- BucketName string `yaml:"bucket_name"`
- SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
- AccessKeyID string `yaml:"access_key_id"`
- Insecure bool `yaml:"insecure" category:"advanced"`
- SignatureVersion string `yaml:"signature_version" category:"advanced"`
- ListObjectsVersion string `yaml:"list_objects_version" category:"advanced"`
- StorageClass string `yaml:"storage_class" category:"experimental"`
- NativeAWSAuthEnabled bool `yaml:"native_aws_auth_enabled" category:"experimental"`
- PartSize uint64 `yaml:"part_size" category:"experimental"`
- SendContentMd5 bool `yaml:"send_content_md5" category:"experimental"`
- STSEndpoint string `yaml:"sts_endpoint"`
+ Endpoint string `yaml:"endpoint"`
+ Region string `yaml:"region"`
+ BucketName string `yaml:"bucket_name"`
+ SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
+ AccessKeyID string `yaml:"access_key_id"`
+ Insecure bool `yaml:"insecure" category:"advanced"`
+ SignatureVersion string `yaml:"signature_version" category:"advanced"`
+ ListObjectsVersion string `yaml:"list_objects_version" category:"advanced"`
+ BucketLookupType s3.BucketLookupType `yaml:"bucket_lookup_type" category:"advanced"`
+ StorageClass string `yaml:"storage_class" category:"experimental"`
+ NativeAWSAuthEnabled bool `yaml:"native_aws_auth_enabled" category:"experimental"`
+ PartSize uint64 `yaml:"part_size" category:"experimental"`
+ SendContentMd5 bool `yaml:"send_content_md5" category:"experimental"`
+ STSEndpoint string `yaml:"sts_endpoint"`
SSE SSEConfig `yaml:"sse"`
HTTP HTTPConfig `yaml:"http"`
@@ -113,6 +132,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.BoolVar(&cfg.NativeAWSAuthEnabled, prefix+"s3.native-aws-auth-enabled", false, "If enabled, it will use the default authentication methods of the AWS SDK for go based on known environment variables and known AWS config files.")
f.Uint64Var(&cfg.PartSize, prefix+"s3.part-size", 0, "The minimum file size in bytes used for multipart uploads. If 0, the value is optimally computed for each object.")
f.BoolVar(&cfg.SendContentMd5, prefix+"s3.send-content-md5", false, "If enabled, a Content-MD5 header is sent with S3 Put Object requests. Consumes more resources to compute the MD5, but may improve compatibility with object storage services that do not support checksums.")
+ f.Var(newBucketLookupTypeValue(s3.AutoLookup, &cfg.BucketLookupType), prefix+"s3.bucket-lookup-type", fmt.Sprintf("Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: %s.", strings.Join(supportedBucketLookupTypes, ", ")))
f.StringVar(&cfg.STSEndpoint, prefix+"s3.sts-endpoint", "", "Accessing S3 resources using temporary, secure credentials provided by AWS Security Token Service.")
cfg.SSE.RegisterFlagsWithPrefix(prefix+"s3.sse.", f)
cfg.HTTP.RegisterFlagsWithPrefix(prefix, f)
@@ -227,3 +247,27 @@ func parseKMSEncryptionContext(data string) (map[string]string, error) {
err := errors.Wrap(json.Unmarshal([]byte(data), &decoded), "unable to parse KMS encryption context")
return decoded, err
}
+
+// bucketLookupTypeValue is an adapter between s3.BucketLookupType and flag.Value.
+type bucketLookupTypeValue s3.BucketLookupType
+
+func newBucketLookupTypeValue(value s3.BucketLookupType, p *s3.BucketLookupType) *bucketLookupTypeValue {
+ *p = value
+ return (*bucketLookupTypeValue)(p)
+}
+
+func (v *bucketLookupTypeValue) String() string {
+ if v == nil {
+ return s3.AutoLookup.String()
+ }
+ return s3.BucketLookupType(*v).String()
+}
+
+func (v *bucketLookupTypeValue) Set(s string) error {
+ t, ok := thanosS3BucketLookupTypes[s]
+ if !ok {
+ return fmt.Errorf("unsupported bucket lookup type: %s", s)
+ }
+ *v = bucketLookupTypeValue(t)
+ return nil
+}
diff --git a/pkg/storage/ingest/DESIGN.md b/pkg/storage/ingest/DESIGN.md
index 5d14104d48b..97fcc4b2497 100644
--- a/pkg/storage/ingest/DESIGN.md
+++ b/pkg/storage/ingest/DESIGN.md
@@ -16,3 +16,22 @@ This has been verified both testing it in Apache Kafka 3.5 (Confluent Kafka 7.5)
- Partition contains 1 record: `ListOffsets(timestamp = -1)` returns offset `1`
For this reason, the offset of the last produced record in a partition is `ListOffsets(timestamp = -1) - 1`.
+
+### Partition start offset
+
+The partition start offset is the offset of the **first** record available for consumption in a partition, if the partition contains some record,
+or the offset of the **next** produced offset if the partition is empty (either because no record has ever been produced yet,
+or the Kafka retention kicked in and deleted all segments).
+The partition start offset can be read from Kafka issuing `ListOffsets` request with `timestamp = -2`.
+
+This has been verified testing it in Apache Kafka 3.6 (Confluent Kafka 7.6). In details, consider the following sequence of events and how `ListOffsets(timestamp = -2)` behaves:
+
+- No record ever produced: `ListOffsets(timestamp = -2)` returns offset `0`
+- Write 1st record: offset of the written record is `0`
+ - Partition contains 1 record: `ListOffsets(timestamp = -2)` returns offset `0`
+- Write 2nd record: offset of the written record is `1`
+ - Partition contains 2 records: `ListOffsets(timestamp = -2)` returns offset `0`
+- Kafka retention triggers and deletes the segment containing the 1st and 2nd record
+ - Partition contains no records: `ListOffsets(timestamp = -2)` returns offset `2`
+- Write 3rd record: offset of the written record is `2`
+ - Partition contains 1 record: `ListOffsets(timestamp = -2)` returns offset `2`
diff --git a/pkg/storage/ingest/config.go b/pkg/storage/ingest/config.go
index 53c85c7c991..820bd45a899 100644
--- a/pkg/storage/ingest/config.go
+++ b/pkg/storage/ingest/config.go
@@ -5,12 +5,24 @@ package ingest
import (
"errors"
"flag"
+ "fmt"
+ "slices"
+ "strings"
"time"
)
+const (
+ consumeFromLastOffset = "last-offset"
+ consumeFromStart = "start"
+ consumeFromEnd = "end"
+)
+
var (
- ErrMissingKafkaAddress = errors.New("the Kafka address has not been configured")
- ErrMissingKafkaTopic = errors.New("the Kafka topic has not been configured")
+ ErrMissingKafkaAddress = errors.New("the Kafka address has not been configured")
+ ErrMissingKafkaTopic = errors.New("the Kafka topic has not been configured")
+ ErrInvalidConsumePosition = errors.New("the configured consume position is invalid")
+
+ consumeFromPositionOptions = []string{consumeFromLastOffset, consumeFromStart, consumeFromEnd}
)
type Config struct {
@@ -48,6 +60,9 @@ type KafkaConfig struct {
LastProducedOffsetPollInterval time.Duration `yaml:"last_produced_offset_poll_interval"`
LastProducedOffsetRetryTimeout time.Duration `yaml:"last_produced_offset_retry_timeout"`
+
+ ConsumeFromPositionAtStartup string `yaml:"consume_from_position_at_startup"`
+ MaxConsumerLagAtStartup time.Duration `yaml:"max_consumer_lag_at_startup"`
}
func (cfg *KafkaConfig) RegisterFlags(f *flag.FlagSet) {
@@ -63,6 +78,9 @@ func (cfg *KafkaConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet)
f.DurationVar(&cfg.LastProducedOffsetPollInterval, prefix+".last-produced-offset-poll-interval", time.Second, "How frequently to poll the last produced offset, used to enforce strong read consistency.")
f.DurationVar(&cfg.LastProducedOffsetRetryTimeout, prefix+".last-produced-offset-retry-timeout", 10*time.Second, "How long to retry a failed request to get the last produced offset.")
+
+ f.StringVar(&cfg.ConsumeFromPositionAtStartup, prefix+".consume-from-position-at-startup", consumeFromLastOffset, fmt.Sprintf("From which position to start consuming the partition at startup. Supported options: %s.", strings.Join(consumeFromPositionOptions, ", ")))
+ f.DurationVar(&cfg.MaxConsumerLagAtStartup, prefix+".max-consumer-lag-at-startup", 15*time.Second, "The maximum tolerated lag before a consumer is considered to have caught up reading from a partition at startup, becomes ACTIVE in the hash ring and passes the readiness check. Set 0 to disable waiting for maximum consumer lag being honored at startup.")
}
func (cfg *KafkaConfig) Validate() error {
@@ -72,6 +90,9 @@ func (cfg *KafkaConfig) Validate() error {
if cfg.Topic == "" {
return ErrMissingKafkaTopic
}
+ if !slices.Contains(consumeFromPositionOptions, cfg.ConsumeFromPositionAtStartup) {
+ return ErrInvalidConsumePosition
+ }
return nil
}
diff --git a/pkg/storage/ingest/config_test.go b/pkg/storage/ingest/config_test.go
index 5f3e5419c5f..09e7998dc35 100644
--- a/pkg/storage/ingest/config_test.go
+++ b/pkg/storage/ingest/config_test.go
@@ -38,6 +38,15 @@ func TestConfig_Validate(t *testing.T) {
cfg.KafkaConfig.Topic = "test"
},
},
+ "should fail if ingest storage is enabled and consume position is invalid": {
+ setup: func(cfg *Config) {
+ cfg.Enabled = true
+ cfg.KafkaConfig.Address = "localhost"
+ cfg.KafkaConfig.Topic = "test"
+ cfg.KafkaConfig.ConsumeFromPositionAtStartup = "middle"
+ },
+ expectedErr: ErrInvalidConsumePosition,
+ },
}
for testName, testData := range tests {
diff --git a/pkg/storage/ingest/partition_offset_reader.go b/pkg/storage/ingest/partition_offset_reader.go
index e97e7af1ec2..861e0720c47 100644
--- a/pkg/storage/ingest/partition_offset_reader.go
+++ b/pkg/storage/ingest/partition_offset_reader.go
@@ -42,9 +42,12 @@ type partitionOffsetReader struct {
nextResultPromise *resultPromise[int64]
// Metrics.
- lastProducedOffsetRequestsTotal prometheus.Counter
- lastProducedOffsetFailuresTotal prometheus.Counter
- lastProducedOffsetLatency prometheus.Summary
+ lastProducedOffsetRequestsTotal prometheus.Counter
+ lastProducedOffsetFailuresTotal prometheus.Counter
+ lastProducedOffsetLatency prometheus.Histogram
+ partitionStartOffsetRequestsTotal prometheus.Counter
+ partitionStartOffsetFailuresTotal prometheus.Counter
+ partitionStartOffsetLatency prometheus.Histogram
}
func newPartitionOffsetReader(client *kgo.Client, topic string, partitionID int32, pollInterval time.Duration, reg prometheus.Registerer, logger log.Logger) *partitionOffsetReader {
@@ -65,13 +68,34 @@ func newPartitionOffsetReader(client *kgo.Client, topic string, partitionID int3
Help: "Total number of failed requests to get the last produced offset.",
ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
}),
- lastProducedOffsetLatency: promauto.With(reg).NewSummary(prometheus.SummaryOpts{
- Name: "cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds",
- Help: "The duration of requests to fetch the last produced offset of a given partition.",
+ lastProducedOffsetLatency: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+ Name: "cortex_ingest_storage_reader_last_produced_offset_request_duration_seconds",
+ Help: "The duration of requests to fetch the last produced offset of a given partition.",
+ ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ Buckets: prometheus.DefBuckets,
+ }),
+
+ partitionStartOffsetRequestsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Name: "cortex_ingest_storage_reader_partition_start_offset_requests_total",
+ Help: "Total number of requests issued to get the partition start offset.",
+ ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
+ }),
+ partitionStartOffsetFailuresTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Name: "cortex_ingest_storage_reader_partition_start_offset_failures_total",
+ Help: "Total number of failed requests to get the partition start offset.",
ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
- Objectives: latencySummaryObjectives,
- MaxAge: time.Minute,
- AgeBuckets: 10,
+ }),
+ partitionStartOffsetLatency: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+ Name: "cortex_ingest_storage_reader_partition_start_offset_request_duration_seconds",
+ Help: "The duration of requests to fetch the start offset of a given partition.",
+ ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ Buckets: prometheus.DefBuckets,
}),
}
@@ -108,10 +132,10 @@ func (p *partitionOffsetReader) getAndNotifyLastProducedOffset(ctx context.Conte
p.nextResultPromise = newResultPromise[int64]()
p.nextResultPromiseMx.Unlock()
- // We call getLastProducedOffset() even if there are no goroutines waiting on the result in order to get
+ // We call FetchLastProducedOffset() even if there are no goroutines waiting on the result in order to get
// a constant load on the Kafka backend. In other words, the load produced on Kafka by this component is
// constant, regardless the number of received queries with strong consistency enabled.
- offset, err := p.getLastProducedOffset(ctx)
+ offset, err := p.FetchLastProducedOffset(ctx)
if err != nil {
level.Warn(p.logger).Log("msg", "failed to fetch the last produced offset", "err", err)
}
@@ -120,10 +144,10 @@ func (p *partitionOffsetReader) getAndNotifyLastProducedOffset(ctx context.Conte
promise.notify(offset, err)
}
-// getLastProducedOffset fetches and returns the last produced offset for a partition, or -1 if the
-// partition is empty. This function issues a single request, but the Kafka client used under the
+// FetchLastProducedOffset fetches and returns the last produced offset for a partition, or -1 if no record has
+// been ever produced in the partition. This function issues a single request, but the Kafka client used under the
// hood may retry a failed request until the retry timeout is hit.
-func (p *partitionOffsetReader) getLastProducedOffset(ctx context.Context) (_ int64, returnErr error) {
+func (p *partitionOffsetReader) FetchLastProducedOffset(ctx context.Context) (_ int64, returnErr error) {
startTime := time.Now()
p.lastProducedOffsetRequestsTotal.Inc()
@@ -137,10 +161,41 @@ func (p *partitionOffsetReader) getLastProducedOffset(ctx context.Context) (_ in
}
}()
+ offset, err := p.fetchPartitionOffset(ctx, kafkaEndOffset)
+ if err != nil {
+ return 0, err
+ }
+
+ // The offset we get is the offset at which the next message will be written, so to get the last produced offset
+ // we have to subtract 1. See DESIGN.md for more details.
+ return offset - 1, nil
+}
+
+// FetchPartitionStartOffset fetches and returns the start offset for a partition. This function returns 0 if no record has
+// been ever produced in the partition. This function issues a single request, but the Kafka client used under the
+// hood may retry a failed request until the retry timeout is hit.
+func (p *partitionOffsetReader) FetchPartitionStartOffset(ctx context.Context) (_ int64, returnErr error) {
+ startTime := time.Now()
+
+ p.partitionStartOffsetRequestsTotal.Inc()
+ defer func() {
+ // We track the latency also in case of error, so that if the request times out it gets
+ // pretty clear looking at latency too.
+ p.partitionStartOffsetLatency.Observe(time.Since(startTime).Seconds())
+
+ if returnErr != nil {
+ p.partitionStartOffsetFailuresTotal.Inc()
+ }
+ }()
+
+ return p.fetchPartitionOffset(ctx, kafkaStartOffset)
+}
+
+func (p *partitionOffsetReader) fetchPartitionOffset(ctx context.Context, position int64) (int64, error) {
// Create a custom request to fetch the latest offset of a specific partition.
partitionReq := kmsg.NewListOffsetsRequestTopicPartition()
partitionReq.Partition = p.partitionID
- partitionReq.Timestamp = -1 // -1 means "latest".
+ partitionReq.Timestamp = position
topicReq := kmsg.NewListOffsetsRequestTopic()
topicReq.Topic = p.topic
@@ -186,17 +241,15 @@ func (p *partitionOffsetReader) getLastProducedOffset(ctx context.Context) (_ in
return 0, err
}
- // The offset we get is the offset at which the next message will be written, so to get the last produced offset
- // we have to subtract 1. See DESIGN.md for more details.
- return listRes.Topics[0].Partitions[0].Offset - 1, nil
+ return listRes.Topics[0].Partitions[0].Offset, nil
}
-// FetchLastProducedOffset returns the result of the *next* "last produced offset" request
+// WaitNextFetchLastProducedOffset returns the result of the *next* "last produced offset" request
// that will be issued.
//
// The "last produced offset" is the offset of the last message written to the partition (starting from 0), or -1 if no
// message has been written yet.
-func (p *partitionOffsetReader) FetchLastProducedOffset(ctx context.Context) (int64, error) {
+func (p *partitionOffsetReader) WaitNextFetchLastProducedOffset(ctx context.Context) (int64, error) {
// Get the promise for the result of the next request that will be issued.
p.nextResultPromiseMx.RLock()
promise := p.nextResultPromise
diff --git a/pkg/storage/ingest/partition_offset_reader_test.go b/pkg/storage/ingest/partition_offset_reader_test.go
index 372e29c3051..5518532c834 100644
--- a/pkg/storage/ingest/partition_offset_reader_test.go
+++ b/pkg/storage/ingest/partition_offset_reader_test.go
@@ -16,6 +16,7 @@ import (
promtest "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/twmb/franz-go/pkg/kadm"
"github.com/twmb/franz-go/pkg/kmsg"
"go.uber.org/atomic"
@@ -48,7 +49,7 @@ func TestPartitionOffsetReader(t *testing.T) {
for i := 0; i < 2; i++ {
runAsync(&wg, func() {
- _, err := reader.FetchLastProducedOffset(ctx)
+ _, err := reader.WaitNextFetchLastProducedOffset(ctx)
assert.Equal(t, errPartitionOffsetReaderStopped, err)
})
}
@@ -59,16 +60,15 @@ func TestPartitionOffsetReader(t *testing.T) {
// At the point we expect the waiting goroutines to be unblocked.
wg.Wait()
- // The next call to FetchLastProducedOffset() should return immediately.
- _, err := reader.FetchLastProducedOffset(ctx)
+ // The next call to WaitNextFetchLastProducedOffset() should return immediately.
+ _, err := reader.WaitNextFetchLastProducedOffset(ctx)
assert.Equal(t, errPartitionOffsetReaderStopped, err)
})
}
-func TestPartitionOffsetReader_getLastProducedOffset(t *testing.T) {
+func TestPartitionOffsetReader_FetchLastProducedOffset(t *testing.T) {
const (
numPartitions = 1
- userID = "user-1"
topicName = "test"
partitionID = int32(0)
pollInterval = time.Second
@@ -90,21 +90,21 @@ func TestPartitionOffsetReader_getLastProducedOffset(t *testing.T) {
reader = newPartitionOffsetReader(client, topicName, partitionID, pollInterval, reg, logger)
)
- offset, err := reader.getLastProducedOffset(ctx)
+ offset, err := reader.FetchLastProducedOffset(ctx)
require.NoError(t, err)
assert.Equal(t, int64(-1), offset)
// Write the 1st message.
produceRecord(ctx, t, client, topicName, partitionID, []byte("message 1"))
- offset, err = reader.getLastProducedOffset(ctx)
+ offset, err = reader.FetchLastProducedOffset(ctx)
require.NoError(t, err)
assert.Equal(t, int64(0), offset)
// Write the 2nd message.
produceRecord(ctx, t, client, topicName, partitionID, []byte("message 2"))
- offset, err = reader.getLastProducedOffset(ctx)
+ offset, err = reader.FetchLastProducedOffset(ctx)
require.NoError(t, err)
assert.Equal(t, int64(1), offset)
@@ -141,7 +141,7 @@ func TestPartitionOffsetReader_getLastProducedOffset(t *testing.T) {
expectedOffset := int64(1)
// Slow down the 1st ListOffsets request.
- cluster.ControlKey(int16(kmsg.ListOffsets), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) {
if firstRequest.CompareAndSwap(true, false) {
close(firstRequestReceived)
time.Sleep(2 * firstRequestTimeout)
@@ -151,20 +151,20 @@ func TestPartitionOffsetReader_getLastProducedOffset(t *testing.T) {
wg := sync.WaitGroup{}
- // Run the 1st getLastProducedOffset() with a timeout which is expected to expire
+ // Run the 1st FetchLastProducedOffset() with a timeout which is expected to expire
// before the request will succeed.
runAsync(&wg, func() {
ctxWithTimeout, cancel := context.WithTimeout(ctx, firstRequestTimeout)
defer cancel()
- _, err := reader.getLastProducedOffset(ctxWithTimeout)
+ _, err := reader.FetchLastProducedOffset(ctxWithTimeout)
require.ErrorIs(t, err, context.DeadlineExceeded)
})
- // Run a 2nd getLastProducedOffset() once the 1st request is received. This request
+ // Run a 2nd FetchLastProducedOffset() once the 1st request is received. This request
// is expected to succeed.
runAsyncAfter(&wg, firstRequestReceived, func() {
- offset, err := reader.getLastProducedOffset(ctx)
+ offset, err := reader.FetchLastProducedOffset(ctx)
require.NoError(t, err)
assert.Equal(t, expectedOffset, offset)
})
@@ -187,14 +187,14 @@ func TestPartitionOffsetReader_getLastProducedOffset(t *testing.T) {
// Make the ListOffsets request failing.
actualTries := atomic.NewInt64(0)
- cluster.ControlKey(int16(kmsg.ListOffsets), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) {
cluster.KeepControl()
actualTries.Inc()
return nil, errors.New("mocked error"), true
})
startTime := time.Now()
- _, err := reader.getLastProducedOffset(ctx)
+ _, err := reader.FetchLastProducedOffset(ctx)
elapsedTime := time.Since(startTime)
require.Error(t, err)
@@ -208,7 +208,171 @@ func TestPartitionOffsetReader_getLastProducedOffset(t *testing.T) {
})
}
-func TestPartitionOffsetReader_FetchLastProducedOffset(t *testing.T) {
+func TestPartitionOffsetReader_FetchPartitionStartOffset(t *testing.T) {
+ const (
+ numPartitions = 1
+ topicName = "test"
+ partitionID = int32(0)
+ pollInterval = time.Second
+ )
+
+ var (
+ ctx = context.Background()
+ logger = log.NewNopLogger()
+ )
+
+ t.Run("should return the partition start offset", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ _, clusterAddr = testkafka.CreateCluster(t, numPartitions, topicName)
+ kafkaCfg = createTestKafkaConfig(clusterAddr, topicName)
+ client = createTestKafkaClient(t, kafkaCfg)
+ reg = prometheus.NewPedanticRegistry()
+ reader = newPartitionOffsetReader(client, topicName, partitionID, pollInterval, reg, logger)
+ )
+
+ offset, err := reader.FetchPartitionStartOffset(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), offset)
+
+ // Write the 1st record.
+ produceRecord(ctx, t, client, topicName, partitionID, []byte("record 1"))
+
+ offset, err = reader.FetchPartitionStartOffset(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), offset)
+
+ // Write the 2nd record.
+ produceRecord(ctx, t, client, topicName, partitionID, []byte("record 2"))
+
+ offset, err = reader.FetchPartitionStartOffset(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), offset)
+
+ // Delete the 1st record.
+ adminClient := kadm.NewClient(client)
+ advancePartitionStartTo := kadm.Offsets{}
+ advancePartitionStartTo.Add(kadm.Offset{Topic: topicName, Partition: partitionID, At: 1})
+ _, err = adminClient.DeleteRecords(ctx, advancePartitionStartTo)
+ require.NoError(t, err)
+ t.Log("advanced partition start offset to 1")
+
+ offset, err = reader.FetchPartitionStartOffset(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), offset)
+
+ assert.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_partition_start_offset_failures_total Total number of failed requests to get the partition start offset.
+ # TYPE cortex_ingest_storage_reader_partition_start_offset_failures_total counter
+ cortex_ingest_storage_reader_partition_start_offset_failures_total{partition="0"} 0
+
+ # HELP cortex_ingest_storage_reader_partition_start_offset_requests_total Total number of requests issued to get the partition start offset.
+ # TYPE cortex_ingest_storage_reader_partition_start_offset_requests_total counter
+ cortex_ingest_storage_reader_partition_start_offset_requests_total{partition="0"} 4
+ `), "cortex_ingest_storage_reader_partition_start_offset_requests_total",
+ "cortex_ingest_storage_reader_partition_start_offset_failures_total"))
+ })
+
+ t.Run("should honor context deadline and not fail other in-flight requests issued while the canceled one was still running", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, numPartitions, topicName)
+ kafkaCfg = createTestKafkaConfig(clusterAddr, topicName)
+ client = createTestKafkaClient(t, kafkaCfg)
+ reg = prometheus.NewPedanticRegistry()
+ reader = newPartitionOffsetReader(client, topicName, partitionID, pollInterval, reg, logger)
+
+ firstRequest = atomic.NewBool(true)
+ firstRequestReceived = make(chan struct{})
+ firstRequestTimeout = time.Second
+ )
+
+ // Write 2 records.
+ produceRecord(ctx, t, client, topicName, partitionID, []byte("record 1"))
+ produceRecord(ctx, t, client, topicName, partitionID, []byte("record 2"))
+ t.Log("produced 2 records")
+
+ // Delete the 1st record.
+ adminClient := kadm.NewClient(client)
+ advancePartitionStartTo := kadm.Offsets{}
+ advancePartitionStartTo.Add(kadm.Offset{Topic: topicName, Partition: partitionID, At: 1})
+ _, err := adminClient.DeleteRecords(ctx, advancePartitionStartTo)
+ require.NoError(t, err)
+ t.Log("advanced partition start offset to 1")
+
+ expectedStartOffset := int64(1)
+
+ // Slow down the 1st ListOffsets request.
+ cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) {
+ if firstRequest.CompareAndSwap(true, false) {
+ close(firstRequestReceived)
+ time.Sleep(2 * firstRequestTimeout)
+ }
+ return nil, nil, false
+ })
+
+ wg := sync.WaitGroup{}
+
+ // Run the 1st FetchPartitionStartOffset() with a timeout which is expected to expire
+ // before the request will succeed.
+ runAsync(&wg, func() {
+ ctxWithTimeout, cancel := context.WithTimeout(ctx, firstRequestTimeout)
+ defer cancel()
+
+ _, err := reader.FetchPartitionStartOffset(ctxWithTimeout)
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+ })
+
+ // Run a 2nd FetchPartitionStartOffset() once the 1st request is received. This request
+ // is expected to succeed.
+ runAsyncAfter(&wg, firstRequestReceived, func() {
+ offset, err := reader.FetchPartitionStartOffset(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, expectedStartOffset, offset)
+ })
+
+ wg.Wait()
+ })
+
+ t.Run("should honor the configured retry timeout", func(t *testing.T) {
+ t.Parallel()
+
+ cluster, clusterAddr := testkafka.CreateCluster(t, numPartitions, topicName)
+
+ // Configure a short retry timeout.
+ kafkaCfg := createTestKafkaConfig(clusterAddr, topicName)
+ kafkaCfg.LastProducedOffsetRetryTimeout = time.Second
+
+ client := createTestKafkaClient(t, kafkaCfg)
+ reg := prometheus.NewPedanticRegistry()
+ reader := newPartitionOffsetReader(client, topicName, partitionID, pollInterval, reg, logger)
+
+ // Make the ListOffsets request failing.
+ actualTries := atomic.NewInt64(0)
+ cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+ actualTries.Inc()
+ return nil, errors.New("mocked error"), true
+ })
+
+ startTime := time.Now()
+ _, err := reader.FetchPartitionStartOffset(ctx)
+ elapsedTime := time.Since(startTime)
+
+ require.Error(t, err)
+
+ // Ensure the retry timeout has been honored.
+ toleranceSeconds := 0.5
+ assert.InDelta(t, kafkaCfg.LastProducedOffsetRetryTimeout.Seconds(), elapsedTime.Seconds(), toleranceSeconds)
+
+ // Ensure the request was retried.
+ assert.Greater(t, actualTries.Load(), int64(1))
+ })
+}
+
+func TestPartitionOffsetReader_WaitNextFetchLastProducedOffset(t *testing.T) {
const (
numPartitions = 1
topicName = "test"
@@ -256,18 +420,18 @@ func TestPartitionOffsetReader_FetchLastProducedOffset(t *testing.T) {
wg := sync.WaitGroup{}
- // The 1st FetchLastProducedOffset() is called before the service start so it's expected
+ // The 1st WaitNextFetchLastProducedOffset() is called before the service start so it's expected
// to wait the result of the 1st request.
runAsync(&wg, func() {
- actual, err := reader.FetchLastProducedOffset(ctx)
+ actual, err := reader.WaitNextFetchLastProducedOffset(ctx)
require.NoError(t, err)
assert.Equal(t, int64(1), actual)
})
- // The 2nd FetchLastProducedOffset() is called while the 1st request is running, so it's expected
+ // The 2nd WaitNextFetchLastProducedOffset() is called while the 1st request is running, so it's expected
// to wait the result of the 2nd request.
runAsyncAfter(&wg, firstRequestReceived, func() {
- actual, err := reader.FetchLastProducedOffset(ctx)
+ actual, err := reader.WaitNextFetchLastProducedOffset(ctx)
require.NoError(t, err)
assert.Equal(t, int64(2), actual)
})
@@ -294,7 +458,7 @@ func TestPartitionOffsetReader_FetchLastProducedOffset(t *testing.T) {
canceledCtx, cancel := context.WithCancel(ctx)
cancel()
- _, err := reader.FetchLastProducedOffset(canceledCtx)
+ _, err := reader.WaitNextFetchLastProducedOffset(canceledCtx)
assert.ErrorIs(t, err, context.Canceled)
})
}
diff --git a/pkg/storage/ingest/partition_offset_watcher.go b/pkg/storage/ingest/partition_offset_watcher.go
index 9dddfbf3f44..77421b93e41 100644
--- a/pkg/storage/ingest/partition_offset_watcher.go
+++ b/pkg/storage/ingest/partition_offset_watcher.go
@@ -163,6 +163,14 @@ func (w *partitionOffsetWatcher) Wait(ctx context.Context, waitForOffset int64)
}
}
+// LastConsumedOffset returns the last consumed offset.
+func (w *partitionOffsetWatcher) LastConsumedOffset() int64 {
+ w.mx.Lock()
+ defer w.mx.Unlock()
+
+ return w.lastConsumedOffset
+}
+
// waitingGoroutinesCount returns the number of active watch groups (an active group has at least
// 1 goroutine waiting). This function is useful for testing.
func (w *partitionOffsetWatcher) watchGroupsCount() int {
diff --git a/pkg/storage/ingest/pusher.go b/pkg/storage/ingest/pusher.go
index dc5fac6d957..9fac27cdf08 100644
--- a/pkg/storage/ingest/pusher.go
+++ b/pkg/storage/ingest/pusher.go
@@ -47,12 +47,13 @@ func newPusherConsumer(p Pusher, reg prometheus.Registerer, l log.Logger) *pushe
return &pusherConsumer{
p: p,
l: l,
- processingTimeSeconds: promauto.With(reg).NewSummary(prometheus.SummaryOpts{
- Name: "cortex_ingest_storage_reader_processing_time_seconds",
- Help: "Time taken to process a single record (write request).",
- Objectives: latencySummaryObjectives,
- MaxAge: time.Minute,
- AgeBuckets: 10,
+ processingTimeSeconds: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+ Name: "cortex_ingest_storage_reader_processing_time_seconds",
+ Help: "Time taken to process a single record (write request).",
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ Buckets: prometheus.DefBuckets,
}),
clientErrRequests: errRequestsCounter.WithLabelValues("client"),
serverErrRequests: errRequestsCounter.WithLabelValues("server"),
@@ -92,13 +93,23 @@ func (c pusherConsumer) pushRequests(ctx context.Context, reqC <-chan parsedReco
c.processingTimeSeconds.Observe(time.Since(processingStart).Seconds())
c.totalRequests.Inc()
+
if err != nil {
if !mimirpb.IsClientError(err) {
c.serverErrRequests.Inc()
return fmt.Errorf("consuming record at index %d for tenant %s: %w", recordIdx, wr.tenantID, err)
}
c.clientErrRequests.Inc()
- level.Warn(c.l).Log("msg", "detected a client error while ingesting write request (the request may have been partially ingested)", "err", err, "user", wr.tenantID)
+
+ // The error could be sampled or marked to be skipped in logs, so we check whether it should be
+ // logged before doing it.
+ if keep, reason := shouldLog(ctx, err); keep {
+ if reason != "" {
+ err = fmt.Errorf("%w (%s)", err, reason)
+ }
+
+ level.Warn(c.l).Log("msg", "detected a client error while ingesting write request (the request may have been partially ingested)", "err", err, "user", wr.tenantID)
+ }
}
}
return nil
diff --git a/pkg/storage/ingest/pusher_test.go b/pkg/storage/ingest/pusher_test.go
index 77f894f9206..3bc64d5f235 100644
--- a/pkg/storage/ingest/pusher_test.go
+++ b/pkg/storage/ingest/pusher_test.go
@@ -4,17 +4,23 @@ package ingest
import (
"context"
+ "fmt"
+ "strings"
"testing"
"github.com/go-kit/log"
"github.com/gogo/status"
+ "github.com/grafana/dskit/concurrency"
+ "github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/tenant"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"github.com/grafana/mimir/pkg/mimirpb"
+ util_log "github.com/grafana/mimir/pkg/util/log"
)
type pusherFunc func(context.Context, *mimirpb.WriteRequest) error
@@ -187,6 +193,88 @@ func TestPusherConsumer(t *testing.T) {
}
}
+func TestPusherConsumer_consume_ShouldLogErrorsHonoringOptionalLogging(t *testing.T) {
+ // Create a request that will be used in this test. The content doesn't matter,
+ // since we only test errors.
+ req := &mimirpb.WriteRequest{Timeseries: []mimirpb.PreallocTimeseries{mockPreallocTimeseries("series_1")}}
+ reqBytes, err := req.Marshal()
+ require.NoError(t, err)
+ reqRecord := record{tenantID: "user-1", content: reqBytes}
+
+ // Utility function used to setup the test.
+ setupTest := func(pusherErr error) (*pusherConsumer, *concurrency.SyncBuffer, *prometheus.Registry) {
+ pusher := pusherFunc(func(context.Context, *mimirpb.WriteRequest) error {
+ return pusherErr
+ })
+
+ reg := prometheus.NewPedanticRegistry()
+ logs := &concurrency.SyncBuffer{}
+ consumer := newPusherConsumer(pusher, reg, log.NewLogfmtLogger(logs))
+
+ return consumer, logs, reg
+ }
+
+ t.Run("should log a client error if does not implement optional logging interface", func(t *testing.T) {
+ pusherErr := ingesterError(mimirpb.BAD_DATA, codes.InvalidArgument, "mocked error")
+ consumer, logs, reg := setupTest(pusherErr)
+
+ // Should return no error on client errors.
+ require.NoError(t, consumer.consume(context.Background(), []record{reqRecord}))
+
+ assert.Contains(t, logs.String(), pusherErr.Error())
+ assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_records_failed_total Number of records (write requests) which caused errors while processing. Client errors are errors such as tenant limits and samples out of bounds. Server errors indicate internal recoverable errors.
+ # TYPE cortex_ingest_storage_reader_records_failed_total counter
+ cortex_ingest_storage_reader_records_failed_total{cause="client"} 1
+ cortex_ingest_storage_reader_records_failed_total{cause="server"} 0
+ `), "cortex_ingest_storage_reader_records_failed_total"))
+ })
+
+ t.Run("should log a client error if does implement optional logging interface and ShouldLog() returns true", func(t *testing.T) {
+ pusherErrSampler := util_log.NewSampler(100)
+ pusherErr := pusherErrSampler.WrapError(ingesterError(mimirpb.BAD_DATA, codes.InvalidArgument, "mocked error"))
+
+ // Pre-requisite: the mocked error should implement the optional logging interface.
+ var optionalLoggingErr middleware.OptionalLogging
+ require.ErrorAs(t, pusherErr, &optionalLoggingErr)
+
+ consumer, logs, reg := setupTest(pusherErr)
+
+ // Should return no error on client errors.
+ require.NoError(t, consumer.consume(context.Background(), []record{reqRecord}))
+
+ assert.Contains(t, logs.String(), fmt.Sprintf("%s (sampled 1/100)", pusherErr.Error()))
+ assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_records_failed_total Number of records (write requests) which caused errors while processing. Client errors are errors such as tenant limits and samples out of bounds. Server errors indicate internal recoverable errors.
+ # TYPE cortex_ingest_storage_reader_records_failed_total counter
+ cortex_ingest_storage_reader_records_failed_total{cause="client"} 1
+ cortex_ingest_storage_reader_records_failed_total{cause="server"} 0
+ `), "cortex_ingest_storage_reader_records_failed_total"))
+ })
+
+ t.Run("should not log a client error if does implement optional logging interface and ShouldLog() returns false", func(t *testing.T) {
+ pusherErr := middleware.DoNotLogError{Err: ingesterError(mimirpb.BAD_DATA, codes.InvalidArgument, "mocked error")}
+
+ // Pre-requisite: the mocked error should implement the optional logging interface.
+ var optionalLoggingErr middleware.OptionalLogging
+ require.ErrorAs(t, pusherErr, &optionalLoggingErr)
+
+ consumer, logs, reg := setupTest(pusherErr)
+
+ // Should return no error on client errors.
+ require.NoError(t, consumer.consume(context.Background(), []record{reqRecord}))
+
+ assert.Empty(t, logs.String())
+ assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_records_failed_total Number of records (write requests) which caused errors while processing. Client errors are errors such as tenant limits and samples out of bounds. Server errors indicate internal recoverable errors.
+ # TYPE cortex_ingest_storage_reader_records_failed_total counter
+ cortex_ingest_storage_reader_records_failed_total{cause="client"} 1
+ cortex_ingest_storage_reader_records_failed_total{cause="server"} 0
+ `), "cortex_ingest_storage_reader_records_failed_total"))
+ })
+
+}
+
// ingesterError mimics how the ingester construct errors
func ingesterError(cause mimirpb.ErrorCause, statusCode codes.Code, message string) error {
errorDetails := &mimirpb.ErrorDetails{Cause: cause}
diff --git a/pkg/storage/ingest/reader.go b/pkg/storage/ingest/reader.go
index 162169abd4d..6219aeae6f3 100644
--- a/pkg/storage/ingest/reader.go
+++ b/pkg/storage/ingest/reader.go
@@ -24,6 +24,14 @@ import (
"go.uber.org/atomic"
)
+const (
+ // kafkaStartOffset is a special offset value that means the beginning of the partition.
+ kafkaStartOffset = int64(-2)
+
+ // kafkaEndOffset is a special offset value that means the end of the partition.
+ kafkaEndOffset = int64(-1)
+)
+
type record struct {
tenantID string
content []byte
@@ -81,19 +89,58 @@ func newPartitionReader(kafkaCfg KafkaConfig, partitionID int32, consumerGroup s
return r, nil
}
-func (r *PartitionReader) start(ctx context.Context) error {
- startFromOffset, err := r.fetchLastCommittedOffsetWithRetries(ctx)
- if err != nil {
- return err
+func (r *PartitionReader) start(ctx context.Context) (returnErr error) {
+ // Stop dependencies if the start() fails.
+ defer func() {
+ if returnErr != nil {
+ _ = r.stopDependencies()
+ }
+ }()
+
+ var (
+ lastConsumedOffset int64
+ startOffset int64
+ err error
+ )
+
+ // Find the offset from which we should start consuming.
+ switch r.kafkaCfg.ConsumeFromPositionAtStartup {
+ case consumeFromStart:
+ lastConsumedOffset = -1
+ startOffset = kafkaStartOffset
+ level.Info(r.logger).Log("msg", "starting consumption from partition start", "start_offset", startOffset, "consumer_group", r.consumerGroup)
+
+ case consumeFromEnd:
+ lastConsumedOffset = -1
+ startOffset = kafkaEndOffset
+ level.Warn(r.logger).Log("msg", "starting consumption from partition end (may cause data loss)", "start_offset", startOffset, "consumer_group", r.consumerGroup)
+
+ default:
+ var exists bool
+ lastConsumedOffset, exists, err = r.fetchLastCommittedOffsetWithRetries(ctx)
+
+ if err != nil {
+ return err
+ } else if exists {
+ startOffset = lastConsumedOffset + 1 // We'll have to start consuming from the next offset (included).
+ level.Info(r.logger).Log("msg", "starting consumption from last consumed offset", "last_consumed_offset", lastConsumedOffset, "start_offset", startOffset, "consumer_group", r.consumerGroup)
+ } else {
+ lastConsumedOffset = -1
+ startOffset = kafkaStartOffset
+ level.Info(r.logger).Log("msg", "starting consumption from partition start because no committed offset has been found", "start_offset", startOffset, "consumer_group", r.consumerGroup)
+ }
+ }
+
+ // Initialise the last consumed offset only if we've got an actual offset from the consumer group.
+ if lastConsumedOffset >= 0 {
+ r.consumedOffsetWatcher.Notify(lastConsumedOffset)
}
- r.consumedOffsetWatcher.Notify(startFromOffset - 1)
- level.Info(r.logger).Log("msg", "resuming consumption from offset", "offset", startFromOffset)
- r.client, err = r.newKafkaReader(kgo.NewOffset().At(startFromOffset))
+ r.client, err = r.newKafkaReader(kgo.NewOffset().At(startOffset))
if err != nil {
return errors.Wrap(err, "creating kafka reader client")
}
- r.committer = newConsumerCommitter(r.kafkaCfg, kadm.NewClient(r.client), r.partitionID, r.consumerGroup, r.commitInterval, r.logger)
+ r.committer = newPartitionCommitter(r.kafkaCfg, kadm.NewClient(r.client), r.partitionID, r.consumerGroup, r.commitInterval, r.logger, r.reg)
r.offsetReader = newPartitionOffsetReader(r.client, r.kafkaCfg.Topic, r.partitionID, r.kafkaCfg.LastProducedOffsetPollInterval, r.reg, r.logger)
@@ -105,38 +152,129 @@ func (r *PartitionReader) start(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "starting service manager")
}
+
+ // Enforce the max consumer lag (if enabled).
+ if maxLag := r.kafkaCfg.MaxConsumerLagAtStartup; maxLag > 0 {
+ if startOffset != kafkaEndOffset {
+ if err := r.processNextFetchesUntilMaxLagHonored(ctx, maxLag); err != nil {
+ return err
+ }
+ } else {
+ level.Info(r.logger).Log("msg", "partition reader is skipping to consume partition until max consumer lag is honored because it's going to consume the partition from the end")
+ }
+ }
+
return nil
}
func (r *PartitionReader) stop(error) error {
level.Info(r.logger).Log("msg", "stopping partition reader")
- err := services.StopManagerAndAwaitStopped(context.Background(), r.dependencies)
- if err != nil {
- return errors.Wrap(err, "stopping service manager")
+ return r.stopDependencies()
+}
+
+func (r *PartitionReader) stopDependencies() error {
+ if r.dependencies != nil {
+ if err := services.StopManagerAndAwaitStopped(context.Background(), r.dependencies); err != nil {
+ return errors.Wrap(err, "stopping service manager")
+ }
}
- r.client.Close()
+
+ if r.client != nil {
+ r.client.Close()
+ }
+
return nil
}
func (r *PartitionReader) run(ctx context.Context) error {
for ctx.Err() == nil {
- fetches := r.client.PollFetches(ctx)
- r.recordFetchesMetrics(fetches)
- r.logFetchErrs(fetches)
- fetches = filterOutErrFetches(fetches)
-
- // TODO consumeFetches() may get interrupted in the middle because of ctx canceled due to PartitionReader stopped.
- // We should improve it, but we shouldn't just pass a context.Background() because if consumption is stuck
- // then PartitionReader will never stop.
- r.consumeFetches(ctx, fetches)
- r.enqueueCommit(fetches)
- r.notifyLastConsumedOffset(fetches)
+ r.processNextFetches(ctx, r.metrics.receiveDelayWhenRunning)
}
return nil
}
+func (r *PartitionReader) processNextFetches(ctx context.Context, delayObserver prometheus.Observer) {
+ fetches := r.client.PollFetches(ctx)
+ r.recordFetchesMetrics(fetches, delayObserver)
+ r.logFetchErrors(fetches)
+ fetches = filterOutErrFetches(fetches)
+
+ // TODO consumeFetches() may get interrupted in the middle because of ctx canceled due to PartitionReader stopped.
+ // We should improve it, but we shouldn't just pass a context.Background() because if consumption is stuck
+ // then PartitionReader will never stop.
+ r.consumeFetches(ctx, fetches)
+ r.enqueueCommit(fetches)
+ r.notifyLastConsumedOffset(fetches)
+}
+
+func (r *PartitionReader) processNextFetchesUntilMaxLagHonored(ctx context.Context, maxLag time.Duration) error {
+ level.Info(r.logger).Log("msg", "partition reader is starting to consume partition until max consumer lag is honored", "max_lag", maxLag)
+
+ boff := backoff.New(ctx, backoff.Config{
+ MinBackoff: 250 * time.Millisecond,
+ MaxBackoff: 2 * time.Second,
+ MaxRetries: 0, // retry forever
+ })
+
+ for boff.Ongoing() {
+ // Send a direct request to the Kafka backend to fetch the partition start offset.
+ partitionStartOffset, err := r.offsetReader.FetchPartitionStartOffset(ctx)
+ if err != nil {
+ level.Warn(r.logger).Log("msg", "partition reader failed to fetch partition start offset", "err", err)
+ boff.Wait()
+ continue
+ }
+
+ // Send a direct request to the Kafka backend to fetch the last produced offset.
+ // We intentionally don't use WaitNextFetchLastProducedOffset() to not introduce further
+ // latency.
+ lastProducedOffset, err := r.offsetReader.FetchLastProducedOffset(ctx)
+ if err != nil {
+ level.Warn(r.logger).Log("msg", "partition reader failed to fetch last produced offset", "err", err)
+ boff.Wait()
+ continue
+ }
+
+ lastProducedOffsetFetchedAt := time.Now()
+
+ // Ensure there're some records to consume. For example, if the partition has been inactive for a long
+ // time and all its records have been deleted, the partition start offset may be > 0 but there are no
+ // records to actually consume.
+ if partitionStartOffset > lastProducedOffset {
+ level.Info(r.logger).Log("msg", "partition reader found no records to consume because partition is empty", "partition_start_offset", partitionStartOffset, "last_produced_offset", lastProducedOffset)
+ return nil
+ }
+
+ // This message is NOT expected to be logged with a very high rate.
+ level.Info(r.logger).Log("msg", "partition reader is consuming records to honor max consumer lag", "partition_start_offset", partitionStartOffset, "last_produced_offset", lastProducedOffset)
+
+ for boff.Ongoing() {
+ // Continue reading until we reached the desired offset.
+ lastConsumedOffset := r.consumedOffsetWatcher.LastConsumedOffset()
+ if lastProducedOffset <= lastConsumedOffset {
+ break
+ }
+
+ r.processNextFetches(ctx, r.metrics.receiveDelayWhenStarting)
+ }
+
+ if boff.Err() != nil {
+ return boff.Err()
+ }
+
+ // If it took less than the max desired lag to replay the partition
+ // then we can stop here, otherwise we'll have to redo it.
+ if currLag := time.Since(lastProducedOffsetFetchedAt); currLag <= maxLag {
+ level.Info(r.logger).Log("msg", "partition reader consumed partition and current lag is less than configured max consumer lag", "last_consumed_offset", r.consumedOffsetWatcher.LastConsumedOffset(), "current_lag", currLag, "max_lag", maxLag)
+ return nil
+ }
+ }
+
+ return boff.Err()
+}
+
func filterOutErrFetches(fetches kgo.Fetches) kgo.Fetches {
filtered := make(kgo.Fetches, 0, len(fetches))
for i, fetch := range fetches {
@@ -159,12 +297,16 @@ func isErrFetch(fetch kgo.Fetch) bool {
return false
}
-func (r *PartitionReader) logFetchErrs(fetches kgo.Fetches) {
+func (r *PartitionReader) logFetchErrors(fetches kgo.Fetches) {
mErr := multierror.New()
- fetches.EachError(func(s string, i int32, err error) {
+ fetches.EachError(func(topic string, partition int32, err error) {
+ if errors.Is(err, context.Canceled) {
+ return
+ }
+
// kgo advises to "restart" the kafka client if the returned error is a kerr.Error.
// Recreating the client would cause duplicate metrics registration, so we don't do it for now.
- mErr.Add(fmt.Errorf("topic %q, partition %d: %w", s, i, err))
+ mErr.Add(fmt.Errorf("topic %q, partition %d: %w", topic, partition, err))
})
if len(mErr) == 0 {
return
@@ -245,10 +387,12 @@ func (r *PartitionReader) notifyLastConsumedOffset(fetches kgo.Fetches) {
// Records are expected to be sorted by offsets, so we can simply look at the last one.
rec := partition.Records[len(partition.Records)-1]
r.consumedOffsetWatcher.Notify(rec.Offset)
+
+ r.metrics.lastConsumedOffset.Set(float64(rec.Offset))
})
}
-func (r *PartitionReader) recordFetchesMetrics(fetches kgo.Fetches) {
+func (r *PartitionReader) recordFetchesMetrics(fetches kgo.Fetches, delayObserver prometheus.Observer) {
var (
now = time.Now()
numRecords = 0
@@ -256,7 +400,7 @@ func (r *PartitionReader) recordFetchesMetrics(fetches kgo.Fetches) {
fetches.EachRecord(func(record *kgo.Record) {
numRecords++
- r.metrics.receiveDelay.Observe(now.Sub(record.Timestamp).Seconds())
+ delayObserver.Observe(now.Sub(record.Timestamp).Seconds())
})
r.metrics.fetchesTotal.Add(float64(len(fetches)))
@@ -289,7 +433,7 @@ func (r *PartitionReader) newKafkaReader(at kgo.Offset) (*kgo.Client, error) {
return client, nil
}
-func (r *PartitionReader) fetchLastCommittedOffsetWithRetries(ctx context.Context) (offset int64, err error) {
+func (r *PartitionReader) fetchLastCommittedOffsetWithRetries(ctx context.Context) (offset int64, exists bool, err error) {
var (
retry = backoff.New(ctx, backoff.Config{
MinBackoff: 100 * time.Millisecond,
@@ -299,9 +443,9 @@ func (r *PartitionReader) fetchLastCommittedOffsetWithRetries(ctx context.Contex
)
for retry.Ongoing() {
- offset, err = r.fetchLastCommittedOffset(ctx)
+ offset, exists, err = r.fetchLastCommittedOffset(ctx)
if err == nil {
- return offset, nil
+ return offset, exists, nil
}
level.Warn(r.logger).Log("msg", "failed to fetch last committed offset", "err", err)
@@ -313,32 +457,39 @@ func (r *PartitionReader) fetchLastCommittedOffsetWithRetries(ctx context.Contex
err = retry.Err()
}
- return offset, err
+ return 0, false, err
}
-func (r *PartitionReader) fetchLastCommittedOffset(ctx context.Context) (int64, error) {
- const endOffset = -1 // -1 is a special value for kafka that means "the last offset"
-
+// fetchLastCommittedOffset returns the last consumed offset which has been committed by the PartitionReader
+// to the consumer group.
+func (r *PartitionReader) fetchLastCommittedOffset(ctx context.Context) (offset int64, exists bool, _ error) {
// We use an ephemeral client to fetch the offset and then create a new client with this offset.
// The reason for this is that changing the offset of an existing client requires to have used this client for fetching at least once.
// We don't want to do noop fetches just to warm up the client, so we create a new client instead.
- cl, err := kgo.NewClient(kgo.SeedBrokers(r.kafkaCfg.Address))
+ cl, err := kgo.NewClient(commonKafkaClientOptions(r.kafkaCfg, r.metrics.kprom, r.logger)...)
if err != nil {
- return endOffset, errors.Wrap(err, "unable to create admin client")
+ return 0, false, errors.Wrap(err, "unable to create admin client")
}
adm := kadm.NewClient(cl)
defer adm.Close()
offsets, err := adm.FetchOffsets(ctx, r.consumerGroup)
- if errors.Is(err, kerr.UnknownTopicOrPartition) {
- // In case we are booting up for the first time ever against this topic.
- return endOffset, nil
+ if errors.Is(err, kerr.GroupIDNotFound) || errors.Is(err, kerr.UnknownTopicOrPartition) {
+ return 0, false, nil
}
if err != nil {
- return endOffset, errors.Wrap(err, "unable to fetch group offsets")
+ return 0, false, errors.Wrap(err, "unable to fetch group offsets")
+ }
+
+ offsetRes, exists := offsets.Lookup(r.kafkaCfg.Topic, r.partitionID)
+ if !exists {
+ return 0, false, nil
}
- offset, _ := offsets.Lookup(r.kafkaCfg.Topic, r.partitionID)
- return offset.At, nil
+ if offsetRes.Err != nil {
+ return 0, false, offsetRes.Err
+ }
+
+ return offsetRes.At, true, nil
}
// WaitReadConsistency waits until all data produced up until now has been consumed by the reader.
@@ -368,7 +519,7 @@ func (r *PartitionReader) WaitReadConsistency(ctx context.Context) (returnErr er
}
// Get the last produced offset.
- lastProducedOffset, err := r.offsetReader.FetchLastProducedOffset(ctx)
+ lastProducedOffset, err := r.offsetReader.WaitNextFetchLastProducedOffset(ctx)
if err != nil {
return err
}
@@ -389,9 +540,15 @@ type partitionCommitter struct {
admClient *kadm.Client
logger log.Logger
+
+ // Metrics.
+ commitRequestsTotal prometheus.Counter
+ commitFailuresTotal prometheus.Counter
+ commitRequestsLatency prometheus.Histogram
+ lastCommittedOffset prometheus.Gauge
}
-func newConsumerCommitter(kafkaCfg KafkaConfig, admClient *kadm.Client, partitionID int32, consumerGroup string, commitInterval time.Duration, logger log.Logger) *partitionCommitter {
+func newPartitionCommitter(kafkaCfg KafkaConfig, admClient *kadm.Client, partitionID int32, consumerGroup string, commitInterval time.Duration, logger log.Logger, reg prometheus.Registerer) *partitionCommitter {
c := &partitionCommitter{
logger: logger,
kafkaCfg: kafkaCfg,
@@ -400,8 +557,37 @@ func newConsumerCommitter(kafkaCfg KafkaConfig, admClient *kadm.Client, partitio
toCommit: atomic.NewInt64(-1),
admClient: admClient,
commitInterval: commitInterval,
+
+ commitRequestsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Name: "cortex_ingest_storage_reader_offset_commit_requests_total",
+ Help: "Total number of requests issued to commit the last consumed offset (includes both successful and failed requests).",
+ ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
+ }),
+ commitFailuresTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Name: "cortex_ingest_storage_reader_offset_commit_failures_total",
+ Help: "Total number of failed requests to commit the last consumed offset.",
+ ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
+ }),
+ commitRequestsLatency: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+ Name: "cortex_ingest_storage_reader_offset_commit_request_duration_seconds",
+ Help: "The duration of requests to commit the last consumed offset.",
+ ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: time.Hour,
+ Buckets: prometheus.DefBuckets,
+ }),
+ lastCommittedOffset: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
+ Name: "cortex_ingest_storage_reader_last_committed_offset",
+ Help: "The last consumed offset successfully committed by the partition reader. Set to -1 if not offset has been committed yet.",
+ ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
+ }),
}
c.Service = services.NewBasicService(nil, c.run, c.stop)
+
+ // Initialise the last committed offset metric to -1 to signal no offset has been committed yet (0 is a valid offset).
+ c.lastCommittedOffset.Set(-1)
+
return c
}
@@ -423,26 +609,43 @@ func (r *partitionCommitter) run(ctx context.Context) error {
if currOffset == previousOffset {
continue
}
- previousOffset = currOffset
- r.commit(ctx, currOffset)
+
+ if err := r.commit(ctx, currOffset); err == nil {
+ previousOffset = currOffset
+ }
}
}
}
-func (r *partitionCommitter) commit(ctx context.Context, offset int64) {
+func (r *partitionCommitter) commit(ctx context.Context, offset int64) (returnErr error) {
+ startTime := time.Now()
+ r.commitRequestsTotal.Inc()
+
+ defer func() {
+ r.commitRequestsLatency.Observe(time.Since(startTime).Seconds())
+
+ if returnErr != nil {
+ level.Error(r.logger).Log("msg", "failed to commit last consumed offset to Kafka", "err", returnErr, "offset", offset)
+ r.commitFailuresTotal.Inc()
+ }
+ }()
+
+ // Commit the last consumed offset.
toCommit := kadm.Offsets{}
- // Commit the offset after the last record.
- // The reason for this is that we resume consumption at this offset.
- // Leader epoch is -1 because we don't know it. This lets Kafka figure it out.
- toCommit.AddOffset(r.kafkaCfg.Topic, r.partitionID, offset+1, -1)
+ toCommit.AddOffset(r.kafkaCfg.Topic, r.partitionID, offset, -1)
committed, err := r.admClient.CommitOffsets(ctx, r.consumerGroup, toCommit)
- if err != nil || !committed.Ok() {
- level.Error(r.logger).Log("msg", "encountered error while committing offsets", "err", err, "commit_err", committed.Error(), "offset", offset)
- } else {
- committedOffset, _ := committed.Lookup(r.kafkaCfg.Topic, r.partitionID)
- level.Debug(r.logger).Log("msg", "committed offset", "offset", committedOffset.Offset.At)
+ if err != nil {
+ return err
+ } else if !committed.Ok() {
+ return committed.Error()
}
+
+ committedOffset, _ := committed.Lookup(r.kafkaCfg.Topic, r.partitionID)
+ level.Debug(r.logger).Log("msg", "last commit offset successfully committed to Kafka", "offset", committedOffset.At)
+ r.lastCommittedOffset.Set(float64(committedOffset.At))
+
+ return nil
}
func (r *partitionCommitter) stop(error) error {
@@ -450,61 +653,79 @@ func (r *partitionCommitter) stop(error) error {
if offset < 0 {
return nil
}
+
// Commit has internal timeouts, so this call shouldn't block for too long.
- r.commit(context.Background(), offset)
+ _ = r.commit(context.Background(), offset)
+
return nil
}
type readerMetrics struct {
- receiveDelay prometheus.Summary
+ receiveDelayWhenStarting prometheus.Observer
+ receiveDelayWhenRunning prometheus.Observer
recordsPerFetch prometheus.Histogram
fetchesErrors prometheus.Counter
fetchesTotal prometheus.Counter
strongConsistencyRequests prometheus.Counter
strongConsistencyFailures prometheus.Counter
- strongConsistencyLatency prometheus.Summary
+ strongConsistencyLatency prometheus.Histogram
+ lastConsumedOffset prometheus.Gauge
kprom *kprom.Metrics
}
func newReaderMetrics(partitionID int32, reg prometheus.Registerer) readerMetrics {
- factory := promauto.With(reg)
+ receiveDelay := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
+ Name: "cortex_ingest_storage_reader_receive_delay_seconds",
+ Help: "Delay between producing a record and receiving it in the consumer.",
+ NativeHistogramZeroThreshold: math.Pow(2, -10), // Values below this will be considered to be 0. Equals to 0.0009765625, or about 1ms.
+ NativeHistogramBucketFactor: 1.2, // We use higher factor (scheme=2) to have wider spread of buckets.
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ Buckets: prometheus.ExponentialBuckets(0.125, 2, 18), // Buckets between 125ms and 9h.
+ }, []string{"phase"})
+
+ lastConsumedOffset := promauto.With(reg).NewGauge(prometheus.GaugeOpts{
+ Name: "cortex_ingest_storage_reader_last_consumed_offset",
+ Help: "The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.",
+ ConstLabels: prometheus.Labels{"partition": strconv.Itoa(int(partitionID))},
+ })
+
+ // Initialise the last consumed offset metric to -1 to signal no offset has been consumed yet (0 is a valid offset).
+ lastConsumedOffset.Set(-1)
return readerMetrics{
- receiveDelay: factory.NewSummary(prometheus.SummaryOpts{
- Name: "cortex_ingest_storage_reader_receive_delay_seconds",
- Help: "Delay between producing a record and receiving it in the consumer.",
- Objectives: latencySummaryObjectives,
- MaxAge: time.Minute,
- AgeBuckets: 10,
- }),
- recordsPerFetch: factory.NewHistogram(prometheus.HistogramOpts{
+ receiveDelayWhenStarting: receiveDelay.WithLabelValues("starting"),
+ receiveDelayWhenRunning: receiveDelay.WithLabelValues("running"),
+ recordsPerFetch: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "cortex_ingest_storage_reader_records_per_fetch",
Help: "The number of records received by the consumer in a single fetch operation.",
Buckets: prometheus.ExponentialBuckets(1, 2, 15),
}),
- fetchesErrors: factory.NewCounter(prometheus.CounterOpts{
+ fetchesErrors: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingest_storage_reader_fetch_errors_total",
Help: "The number of fetch errors encountered by the consumer.",
}),
- fetchesTotal: factory.NewCounter(prometheus.CounterOpts{
+ fetchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingest_storage_reader_fetches_total",
Help: "Total number of Kafka fetches received by the consumer.",
}),
- strongConsistencyRequests: factory.NewCounter(prometheus.CounterOpts{
+ strongConsistencyRequests: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingest_storage_strong_consistency_requests_total",
Help: "Total number of requests for which strong consistency has been requested.",
}),
- strongConsistencyFailures: factory.NewCounter(prometheus.CounterOpts{
+ strongConsistencyFailures: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingest_storage_strong_consistency_failures_total",
Help: "Total number of failures while waiting for strong consistency to be enforced.",
}),
- strongConsistencyLatency: factory.NewSummary(prometheus.SummaryOpts{
- Name: "cortex_ingest_storage_strong_consistency_wait_duration_seconds",
- Help: "How long a request spent waiting for strong consistency to be guaranteed.",
- Objectives: latencySummaryObjectives,
- MaxAge: time.Minute,
- AgeBuckets: 10,
+ strongConsistencyLatency: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+ Name: "cortex_ingest_storage_strong_consistency_wait_duration_seconds",
+ Help: "How long a request spent waiting for strong consistency to be guaranteed.",
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ Buckets: prometheus.DefBuckets,
}),
+ lastConsumedOffset: lastConsumedOffset,
kprom: kprom.NewMetrics("cortex_ingest_storage_reader",
kprom.Registerer(prometheus.WrapRegistererWith(prometheus.Labels{"partition": strconv.Itoa(int(partitionID))}, reg)),
// Do not export the client ID, because we use it to specify options to the backend.
diff --git a/pkg/storage/ingest/reader_test.go b/pkg/storage/ingest/reader_test.go
index e0b5435c21c..59206beb83e 100644
--- a/pkg/storage/ingest/reader_test.go
+++ b/pkg/storage/ingest/reader_test.go
@@ -6,22 +6,43 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"strings"
+ "sync"
"testing"
"time"
"github.com/go-kit/log"
"github.com/grafana/dskit/services"
+ "github.com/grafana/dskit/test"
"github.com/prometheus/client_golang/prometheus"
promtest "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/prometheus/prometheus/util/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/twmb/franz-go/pkg/kadm"
+ "github.com/twmb/franz-go/pkg/kerr"
"github.com/twmb/franz-go/pkg/kgo"
+ "github.com/twmb/franz-go/pkg/kmsg"
"go.uber.org/atomic"
"github.com/grafana/mimir/pkg/util/testkafka"
)
+func TestKafkaStartOffset(t *testing.T) {
+ t.Run("should match Kafka client start offset", func(t *testing.T) {
+ expected := kgo.NewOffset().AtStart().EpochOffset().Offset
+ assert.Equal(t, expected, kafkaStartOffset)
+ })
+}
+
+func TestKafkaEndOffset(t *testing.T) {
+ t.Run("should match Kafka client end offset", func(t *testing.T) {
+ expected := kgo.NewOffset().AtEnd().EpochOffset().Offset
+ assert.Equal(t, expected, kafkaEndOffset)
+ })
+}
+
func TestPartitionReader(t *testing.T) {
const (
topicName = "test"
@@ -36,7 +57,7 @@ func TestPartitionReader(t *testing.T) {
content := []byte("special content")
consumer := newTestConsumer(2)
- startReader(ctx, t, clusterAddr, topicName, partitionID, consumer)
+ createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer)
writeClient := newKafkaProduceClient(t, clusterAddr)
@@ -48,7 +69,38 @@ func TestPartitionReader(t *testing.T) {
assert.Equal(t, [][]byte{content, content}, records)
}
-func TestReader_ConsumerError(t *testing.T) {
+func TestPartitionReader_logFetchErrors(t *testing.T) {
+ const (
+ topicName = "test"
+ partitionID = 1
+ )
+
+ cfg := defaultReaderTestConfig(t, "", topicName, partitionID, nil)
+ reader, err := newPartitionReader(cfg.kafka, cfg.partitionID, "test-group", cfg.consumer, cfg.logger, cfg.registry)
+ require.NoError(t, err)
+
+ reader.logFetchErrors(kgo.Fetches{
+ kgo.Fetch{Topics: []kgo.FetchTopic{
+ {
+ Topic: topicName,
+ Partitions: []kgo.FetchPartition{
+ {Partition: partitionID, Err: nil},
+ {Partition: partitionID, Err: context.Canceled}, // not counted in metrics
+ {Partition: partitionID, Err: fmt.Errorf("wrapped: %w", context.Canceled)}, // not counted in metrics
+ {Partition: partitionID, Err: fmt.Errorf("real error")}, // counted
+ },
+ },
+ }},
+ })
+
+ assert.NoError(t, promtest.GatherAndCompare(cfg.registry, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_fetch_errors_total The number of fetch errors encountered by the consumer.
+ # TYPE cortex_ingest_storage_reader_fetch_errors_total counter
+ cortex_ingest_storage_reader_fetch_errors_total 1
+ `), "cortex_ingest_storage_reader_fetch_errors_total"))
+}
+
+func TestPartitionReader_ConsumerError(t *testing.T) {
const (
topicName = "test"
partitionID = 1
@@ -71,7 +123,7 @@ func TestReader_ConsumerError(t *testing.T) {
assert.Equal(t, "1", string(records[0].content))
return errors.New("consumer error")
})
- startReader(ctx, t, clusterAddr, topicName, partitionID, consumer)
+ createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer)
// Write to Kafka.
writeClient := newKafkaProduceClient(t, clusterAddr)
@@ -105,7 +157,7 @@ func TestPartitionReader_WaitReadConsistency(t *testing.T) {
_, clusterAddr := testkafka.CreateCluster(t, 1, topicName)
// Configure the reader to poll the "last produced offset" frequently.
- reader := startReader(ctx, t, clusterAddr, topicName, partitionID, consumer,
+ reader := createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer,
withLastProducedOffsetPollInterval(100*time.Millisecond),
withRegistry(reg))
@@ -122,7 +174,7 @@ func TestPartitionReader_WaitReadConsistency(t *testing.T) {
// We define a custom consume function which introduces a delay once the 2nd record
// has been consumed but before the function returns. From the PartitionReader perspective,
// the 2nd record consumption will be delayed.
- consumer := consumerFunc(func(ctx context.Context, records []record) error {
+ consumer := consumerFunc(func(_ context.Context, records []record) error {
for _, record := range records {
// Introduce a delay before returning from the consume function once
// the 2nd record has been consumed.
@@ -241,6 +293,937 @@ func TestPartitionReader_WaitReadConsistency(t *testing.T) {
})
}
+func TestPartitionReader_ConsumeAtStartup(t *testing.T) {
+ const (
+ topicName = "test"
+ partitionID = 1
+ )
+
+ ctx := context.Background()
+
+ t.Run("should immediately switch to Running state if partition is empty", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ _, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ consumer = consumerFunc(func(context.Context, []record) error { return nil })
+ reg = prometheus.NewPedanticRegistry()
+ )
+
+ // Create and start the reader. We expect the reader to start even if partition is empty.
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second), withRegistry(reg))
+ require.NoError(t, services.StartAndAwaitRunning(ctx, reader))
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
+
+ // The last consumed offset should be -1, since nothing has been consumed yet.
+ assert.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_consumed_offset The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.
+ # TYPE cortex_ingest_storage_reader_last_consumed_offset gauge
+ cortex_ingest_storage_reader_last_consumed_offset{partition="1"} -1
+ `), "cortex_ingest_storage_reader_last_consumed_offset"))
+ })
+
+ t.Run("should immediately switch to Running state if configured max lag is 0", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ consumer = consumerFunc(func(context.Context, []record) error { return nil })
+ reg = prometheus.NewPedanticRegistry()
+ )
+
+ // Mock Kafka to fail the Fetch request.
+ cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ return nil, errors.New("mocked error"), true
+ })
+
+ // Produce some records.
+ writeClient := newKafkaProduceClient(t, clusterAddr)
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-1"))
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-2"))
+ t.Log("produced 2 records")
+
+ // Create and start the reader. We expect the reader to start even if Fetch is failing.
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(0), withRegistry(reg))
+ require.NoError(t, services.StartAndAwaitRunning(ctx, reader))
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
+
+ // The last consumed offset should be -1, since nothing has been consumed yet (Fetch requests are failing).
+ assert.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_consumed_offset The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.
+ # TYPE cortex_ingest_storage_reader_last_consumed_offset gauge
+ cortex_ingest_storage_reader_last_consumed_offset{partition="1"} -1
+ `), "cortex_ingest_storage_reader_last_consumed_offset"))
+ })
+
+ t.Run("should consume partition from start if last committed offset is missing and wait until max lag is honored", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ fetchRequestsCount = atomic.NewInt64(0)
+ fetchShouldFail = atomic.NewBool(true)
+ consumedRecordsCount = atomic.NewInt64(0)
+ )
+
+ consumer := consumerFunc(func(_ context.Context, records []record) error {
+ consumedRecordsCount.Add(int64(len(records)))
+ return nil
+ })
+
+ cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+ fetchRequestsCount.Inc()
+
+ if fetchShouldFail.Load() {
+ return nil, errors.New("mocked error"), true
+ }
+
+ return nil, nil, false
+ })
+
+ // Produce some records.
+ writeClient := newKafkaProduceClient(t, clusterAddr)
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-1"))
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-2"))
+ t.Log("produced 2 records")
+
+ // Create and start the reader.
+ reg := prometheus.NewPedanticRegistry()
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second), withRegistry(reg))
+ require.NoError(t, reader.StartAsync(ctx))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
+ })
+
+ // Wait until the Kafka cluster received few Fetch requests.
+ test.Poll(t, 5*time.Second, true, func() interface{} {
+ return fetchRequestsCount.Load() > 2
+ })
+
+ // Since the mocked Kafka cluster is configured to fail any Fetch we expect the reader hasn't
+ // catched up yet, and it's still in Starting state.
+ assert.Equal(t, services.Starting, reader.State())
+ assert.Equal(t, int64(0), consumedRecordsCount.Load())
+
+ // Unblock the Fetch requests. Now they will succeed.
+ fetchShouldFail.Store(false)
+
+ // We expect the reader to catch up, and then switch to Running state.
+ test.Poll(t, 5*time.Second, services.Running, func() interface{} {
+ return reader.State()
+ })
+
+ assert.Equal(t, int64(2), consumedRecordsCount.Load())
+
+ // We expect the last consumed offset to be tracked in a metric.
+ test.Poll(t, time.Second, nil, func() interface{} {
+ return promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_consumed_offset The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.
+ # TYPE cortex_ingest_storage_reader_last_consumed_offset gauge
+ cortex_ingest_storage_reader_last_consumed_offset{partition="1"} 1
+ `), "cortex_ingest_storage_reader_last_consumed_offset")
+ })
+ })
+
+ t.Run("should consume partition from start if last committed offset is missing and wait until max lag is honored and retry if a failure occurs when fetching last produced offset", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ listOffsetsRequestsCount = atomic.NewInt64(0)
+ listOffsetsShouldFail = atomic.NewBool(true)
+ consumedRecordsCount = atomic.NewInt64(0)
+ )
+
+ consumer := consumerFunc(func(_ context.Context, records []record) error {
+ consumedRecordsCount.Add(int64(len(records)))
+ return nil
+ })
+
+ cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+ listOffsetsRequestsCount.Inc()
+
+ if listOffsetsShouldFail.Load() {
+ return nil, errors.New("mocked error"), true
+ }
+
+ return nil, nil, false
+ })
+
+ // Produce some records.
+ writeClient := newKafkaProduceClient(t, clusterAddr)
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-1"))
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-2"))
+ t.Log("produced 2 records")
+
+ // Create and start the reader.
+ reg := prometheus.NewPedanticRegistry()
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second), withRegistry(reg))
+ require.NoError(t, reader.StartAsync(ctx))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
+ })
+
+ // Wait until the Kafka cluster received few ListOffsets requests.
+ test.Poll(t, 5*time.Second, true, func() interface{} {
+ return listOffsetsRequestsCount.Load() > 2
+ })
+
+ // Since the mocked Kafka cluster is configured to fail any ListOffsets request we expect the reader hasn't
+ // catched up yet, and it's still in Starting state.
+ assert.Equal(t, services.Starting, reader.State())
+ assert.Equal(t, int64(0), consumedRecordsCount.Load())
+
+ // Unblock the ListOffsets requests. Now they will succeed.
+ listOffsetsShouldFail.Store(false)
+
+ // We expect the reader to catch up, and then switch to Running state.
+ test.Poll(t, 5*time.Second, services.Running, func() interface{} {
+ return reader.State()
+ })
+
+ assert.Equal(t, int64(2), consumedRecordsCount.Load())
+
+ // We expect the last consumed offset to be tracked in a metric.
+ test.Poll(t, time.Second, nil, func() interface{} {
+ return promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_consumed_offset The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.
+ # TYPE cortex_ingest_storage_reader_last_consumed_offset gauge
+ cortex_ingest_storage_reader_last_consumed_offset{partition="1"} 1
+ `), "cortex_ingest_storage_reader_last_consumed_offset")
+ })
+ })
+
+ t.Run("should consume partition from end if position=end, and skip honoring max lag", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ reg = prometheus.NewPedanticRegistry()
+ fetchRequestsCount = atomic.NewInt64(0)
+ fetchShouldFail = atomic.NewBool(true)
+ consumedRecordsMx sync.Mutex
+ consumedRecords []string
+ )
+
+ consumer := consumerFunc(func(_ context.Context, records []record) error {
+ consumedRecordsMx.Lock()
+ defer consumedRecordsMx.Unlock()
+
+ for _, r := range records {
+ consumedRecords = append(consumedRecords, string(r.content))
+ }
+ return nil
+ })
+
+ cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ fetchRequestsCount.Inc()
+ if fetchShouldFail.Load() {
+ return nil, errors.New("mocked error"), true
+ }
+
+ return nil, nil, false
+ })
+
+ // Produce some records.
+ writeClient := newKafkaProduceClient(t, clusterAddr)
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-1"))
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-2"))
+ t.Log("produced 2 records before starting the reader")
+
+ // Create and start the reader.
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withConsumeFromPositionAtStartup(consumeFromEnd), withMaxConsumerLagAtStartup(time.Second), withRegistry(reg))
+ require.NoError(t, reader.StartAsync(ctx))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
+ })
+
+ // The reader service should start even if Fetch is failing because max log is skipped.
+ test.Poll(t, time.Second, services.Running, func() interface{} {
+ return reader.State()
+ })
+
+ // Make Fetch working.
+ fetchShouldFail.Store(false)
+
+ // Wait until Fetch request has been issued at least once, in order to avoid any race condition
+ // (the problem is that we may produce the next record before the client fetched the partition end position).
+ require.Eventually(t, func() bool {
+ return fetchRequestsCount.Load() > 0
+ }, 5*time.Second, 10*time.Millisecond)
+
+ // Produce one more record.
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-3"))
+ t.Log("produced 1 record after starting the reader")
+
+ // Since the reader has been configured with position=end we expect to consume only
+ // the record produced after reader has been started.
+ test.Poll(t, 5*time.Second, []string{"record-3"}, func() interface{} {
+ consumedRecordsMx.Lock()
+ defer consumedRecordsMx.Unlock()
+ return slices.Clone(consumedRecords)
+ })
+
+ // We expect the last consumed offset to be tracked in a metric.
+ test.Poll(t, time.Second, nil, func() interface{} {
+ return promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_consumed_offset The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.
+ # TYPE cortex_ingest_storage_reader_last_consumed_offset gauge
+ cortex_ingest_storage_reader_last_consumed_offset{partition="1"} 2
+ `), "cortex_ingest_storage_reader_last_consumed_offset")
+ })
+ })
+
+ t.Run("should consume partition from start if position=start, and wait until max lag is honored", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ fetchRequestsCount = atomic.NewInt64(0)
+ fetchShouldFail = atomic.NewBool(false)
+ consumedRecordsMx sync.Mutex
+ consumedRecords []string
+ )
+
+ consumer := consumerFunc(func(_ context.Context, records []record) error {
+ consumedRecordsMx.Lock()
+ defer consumedRecordsMx.Unlock()
+
+ for _, r := range records {
+ consumedRecords = append(consumedRecords, string(r.content))
+ }
+ return nil
+ })
+
+ cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+ fetchRequestsCount.Inc()
+
+ if fetchShouldFail.Load() {
+ return nil, errors.New("mocked error"), true
+ }
+
+ return nil, nil, false
+ })
+
+ // Produce some records.
+ writeClient := newKafkaProduceClient(t, clusterAddr)
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-1"))
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-2"))
+ t.Log("produced 2 records")
+
+ // Run the test twice with the same Kafka cluster to show that second time it consumes all records again.
+ for run := 1; run <= 2; run++ {
+ t.Run(fmt.Sprintf("Run %d", run), func(t *testing.T) {
+ // Reset the test.
+ fetchShouldFail.Store(true)
+ fetchRequestsCount.Store(0)
+ consumedRecordsMx.Lock()
+ consumedRecords = nil
+ consumedRecordsMx.Unlock()
+
+ // Create and start the reader.
+ reg := prometheus.NewPedanticRegistry()
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withConsumeFromPositionAtStartup(consumeFromStart), withMaxConsumerLagAtStartup(time.Second), withRegistry(reg))
+ require.NoError(t, reader.StartAsync(ctx))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
+ })
+
+ // Wait until the Kafka cluster received few Fetch requests.
+ test.Poll(t, 5*time.Second, true, func() interface{} {
+ return fetchRequestsCount.Load() > 2
+ })
+
+ // Since the mocked Kafka cluster is configured to fail any Fetch we expect the reader hasn't
+ // catched up yet, and it's still in Starting state.
+ assert.Equal(t, services.Starting, reader.State())
+
+ // Unblock the Fetch requests. Now they will succeed.
+ fetchShouldFail.Store(false)
+
+ // We expect the reader to catch up, and then switch to Running state.
+ test.Poll(t, 5*time.Second, services.Running, func() interface{} {
+ return reader.State()
+ })
+
+ // We expect the reader to have consumed the partition from start.
+ test.Poll(t, time.Second, []string{"record-1", "record-2"}, func() interface{} {
+ consumedRecordsMx.Lock()
+ defer consumedRecordsMx.Unlock()
+ return slices.Clone(consumedRecords)
+ })
+
+ // We expect the last consumed offset to be tracked in a metric.
+ test.Poll(t, time.Second, nil, func() interface{} {
+ return promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_consumed_offset The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.
+ # TYPE cortex_ingest_storage_reader_last_consumed_offset gauge
+ cortex_ingest_storage_reader_last_consumed_offset{partition="1"} 1
+ `), "cortex_ingest_storage_reader_last_consumed_offset")
+ })
+ })
+ }
+ })
+
+ t.Run("should consume partition from last committed offset if position=last-offset, and wait until max lag is honored", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ fetchRequestsCount = atomic.NewInt64(0)
+ fetchShouldFail = atomic.NewBool(false)
+ consumedRecordsMx sync.Mutex
+ consumedRecords []string
+ )
+
+ consumer := consumerFunc(func(_ context.Context, records []record) error {
+ consumedRecordsMx.Lock()
+ defer consumedRecordsMx.Unlock()
+
+ for _, r := range records {
+ consumedRecords = append(consumedRecords, string(r.content))
+ }
+ return nil
+ })
+
+ cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+ fetchRequestsCount.Inc()
+
+ if fetchShouldFail.Load() {
+ return nil, errors.New("mocked error"), true
+ }
+
+ return nil, nil, false
+ })
+
+ // Run the test twice with the same Kafka cluster to show that second time it consumes only new records.
+ for run := 1; run <= 2; run++ {
+ t.Run(fmt.Sprintf("Run %d", run), func(t *testing.T) {
+ // Reset the test.
+ fetchShouldFail.Store(true)
+ fetchRequestsCount.Store(0)
+ consumedRecordsMx.Lock()
+ consumedRecords = nil
+ consumedRecordsMx.Unlock()
+
+ // Produce a record before each test run.
+ writeClient := newKafkaProduceClient(t, clusterAddr)
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte(fmt.Sprintf("record-%d", run)))
+ t.Log("produced 1 record")
+
+ // Create and start the reader.
+ reg := prometheus.NewPedanticRegistry()
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withConsumeFromPositionAtStartup(consumeFromLastOffset), withMaxConsumerLagAtStartup(time.Second), withRegistry(reg))
+ require.NoError(t, reader.StartAsync(ctx))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
+ })
+
+ // Wait until the Kafka cluster received few Fetch requests.
+ test.Poll(t, 5*time.Second, true, func() interface{} {
+ return fetchRequestsCount.Load() > 2
+ })
+
+ // Since the mocked Kafka cluster is configured to fail any Fetch we expect the reader hasn't
+ // catched up yet, and it's still in Starting state.
+ assert.Equal(t, services.Starting, reader.State())
+
+ // Unblock the Fetch requests. Now they will succeed.
+ fetchShouldFail.Store(false)
+
+ // We expect the reader to catch up, and then switch to Running state.
+ test.Poll(t, 5*time.Second, services.Running, func() interface{} {
+ return reader.State()
+ })
+
+ // We expect the reader to have consumed the partition from last offset.
+ test.Poll(t, time.Second, []string{fmt.Sprintf("record-%d", run)}, func() interface{} {
+ consumedRecordsMx.Lock()
+ defer consumedRecordsMx.Unlock()
+ return slices.Clone(consumedRecords)
+ })
+
+ // We expect the last consumed offset to be tracked in a metric.
+ expectedConsumedOffset := run - 1
+ test.Poll(t, time.Second, nil, func() interface{} {
+ return promtest.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(`
+ # HELP cortex_ingest_storage_reader_last_consumed_offset The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.
+ # TYPE cortex_ingest_storage_reader_last_consumed_offset gauge
+ cortex_ingest_storage_reader_last_consumed_offset{partition="1"} %d
+ `, expectedConsumedOffset)), "cortex_ingest_storage_reader_last_consumed_offset")
+ })
+ })
+ }
+ })
+
+ t.Run("should not wait indefinitely if context is cancelled while fetching last produced offset", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ consumer = consumerFunc(func(context.Context, []record) error { return nil })
+ listOffsetsRequestsCount = atomic.NewInt64(0)
+ )
+
+ // Mock Kafka to always fail the ListOffsets request.
+ cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ listOffsetsRequestsCount.Inc()
+ return nil, errors.New("mocked error"), true
+ })
+
+ // Create and start the reader.
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second))
+
+ readerCtx, cancelReaderCtx := context.WithCancel(ctx)
+ require.NoError(t, reader.StartAsync(readerCtx))
+
+ // Wait until the Kafka cluster received at least 1 ListOffsets request.
+ test.Poll(t, 5*time.Second, true, func() interface{} {
+ return listOffsetsRequestsCount.Load() > 0
+ })
+
+ // Cancelling the context should cause the service to switch to a terminal state.
+ assert.Equal(t, services.Starting, reader.State())
+ cancelReaderCtx()
+
+ test.Poll(t, 5*time.Second, services.Failed, func() interface{} {
+ return reader.State()
+ })
+ })
+
+ t.Run("should not wait indefinitely if context is cancelled while fetching records", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName)
+ consumer = consumerFunc(func(context.Context, []record) error { return nil })
+ fetchRequestsCount = atomic.NewInt64(0)
+ )
+
+ // Mock Kafka to always fail the Fetch request.
+ cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ fetchRequestsCount.Inc()
+ return nil, errors.New("mocked error"), true
+ })
+
+ // Produce some records.
+ writeClient := newKafkaProduceClient(t, clusterAddr)
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-1"))
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-2"))
+ t.Log("produced 2 records")
+
+ // Create and start the reader.
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second))
+
+ readerCtx, cancelReaderCtx := context.WithCancel(ctx)
+ require.NoError(t, reader.StartAsync(readerCtx))
+
+ // Wait until the Kafka cluster received at least 1 Fetch request.
+ test.Poll(t, 5*time.Second, true, func() interface{} {
+ return fetchRequestsCount.Load() > 0
+ })
+
+ // Cancelling the context should cause the service to switch to a terminal state.
+ assert.Equal(t, services.Starting, reader.State())
+ cancelReaderCtx()
+
+ test.Poll(t, 5*time.Second, services.Failed, func() interface{} {
+ return reader.State()
+ })
+ })
+
+ t.Run("should not wait indefinitely if there are no records to consume from Kafka but partition start offset is > 0 (e.g. all previous records have been deleted by Kafka retention)", func(t *testing.T) {
+ t.Parallel()
+
+ for _, consumeFromPosition := range consumeFromPositionOptions {
+ consumeFromPosition := consumeFromPosition
+
+ t.Run(fmt.Sprintf("consume from position: %s", consumeFromPosition), func(t *testing.T) {
+ t.Parallel()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ consumer := consumerFunc(func(context.Context, []record) error {
+ return nil
+ })
+
+ cluster, clusterAddr := testkafka.CreateCluster(t, partitionID+1, topicName)
+ cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ // Throttle the Fetch request.
+ select {
+ case <-ctx.Done():
+ case <-time.After(time.Second):
+ }
+
+ return nil, nil, false
+ })
+
+ // Produce some records.
+ writeClient := newKafkaProduceClient(t, clusterAddr)
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-1"))
+ produceRecord(ctx, t, writeClient, topicName, partitionID, []byte("record-2"))
+ t.Log("produced 2 records")
+
+ // Fetch the partition end offset, which is the offset of the next record that will be produced.
+ adminClient := kadm.NewClient(writeClient)
+ endOffsets, err := adminClient.ListEndOffsets(ctx, topicName)
+ require.NoError(t, err)
+ endOffset, exists := endOffsets.Lookup(topicName, partitionID)
+ require.True(t, exists)
+ require.NoError(t, endOffset.Err)
+ t.Logf("fetched partition end offset: %d", endOffset.Offset)
+
+ // Issue a request to delete produced records so far. What Kafka does under the hood is to advance
+ // the partition start offset to the specified offset.
+ advancePartitionStartTo := kadm.Offsets{}
+ advancePartitionStartTo.Add(kadm.Offset{Topic: topicName, Partition: partitionID, At: endOffset.Offset})
+ _, err = adminClient.DeleteRecords(ctx, advancePartitionStartTo)
+ require.NoError(t, err)
+ t.Logf("advanced partition start offset to: %d", endOffset.Offset)
+
+ // Create and start the reader. We expect the reader to immediately switch to Running state.
+ reg := prometheus.NewPedanticRegistry()
+ reader := createReader(t, clusterAddr, topicName, partitionID, consumer,
+ withConsumeFromPositionAtStartup(consumeFromPosition),
+ withMaxConsumerLagAtStartup(time.Second),
+ withRegistry(reg))
+
+ require.NoError(t, services.StartAndAwaitRunning(ctx, reader))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
+ })
+
+ // We expect no record has been consumed.
+ require.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_consumed_offset The last offset successfully consumed by the partition reader. Set to -1 if not offset has been consumed yet.
+ # TYPE cortex_ingest_storage_reader_last_consumed_offset gauge
+ cortex_ingest_storage_reader_last_consumed_offset{partition="1"} -1
+
+ # HELP cortex_ingest_storage_reader_last_committed_offset The last consumed offset successfully committed by the partition reader. Set to -1 if not offset has been committed yet.
+ # TYPE cortex_ingest_storage_reader_last_committed_offset gauge
+ cortex_ingest_storage_reader_last_committed_offset{partition="1"} -1
+ `), "cortex_ingest_storage_reader_last_consumed_offset", "cortex_ingest_storage_reader_last_committed_offset"))
+ })
+ }
+ })
+}
+
+func TestPartitionReader_fetchLastCommittedOffset(t *testing.T) {
+ const (
+ topicName = "test"
+ partitionID = 1
+ )
+
+ var (
+ ctx = context.Background()
+ )
+
+ t.Run("should return 'not exists' if Kafka returns GroupIDNotFound", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName)
+ consumer = consumerFunc(func(context.Context, []record) error { return nil })
+ reader = createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second))
+ )
+
+ cluster.ControlKey(int16(kmsg.OffsetFetch), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ req := request.(*kmsg.OffsetFetchRequest)
+ res := req.ResponseKind().(*kmsg.OffsetFetchResponse)
+ res.Default()
+ res.Groups = []kmsg.OffsetFetchResponseGroup{
+ {
+ Group: reader.consumerGroup,
+ ErrorCode: kerr.GroupIDNotFound.Code,
+ },
+ }
+
+ return res, nil, true
+ })
+
+ _, exists, err := reader.fetchLastCommittedOffset(ctx)
+ require.NoError(t, err)
+ assert.False(t, exists)
+ })
+
+ t.Run("should return 'not exists' if Kafka returns no offsets for the requested partition", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName)
+ consumer = consumerFunc(func(context.Context, []record) error { return nil })
+ reader = createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second))
+ )
+
+ cluster.ControlKey(int16(kmsg.OffsetFetch), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ req := request.(*kmsg.OffsetFetchRequest)
+ res := req.ResponseKind().(*kmsg.OffsetFetchResponse)
+ res.Default()
+ res.Groups = []kmsg.OffsetFetchResponseGroup{
+ {
+ Group: reader.consumerGroup,
+ Topics: []kmsg.OffsetFetchResponseGroupTopic{
+ {
+ Topic: topicName,
+ Partitions: []kmsg.OffsetFetchResponseGroupTopicPartition{
+ {
+ Partition: partitionID + 1, // Another partition.
+ Offset: 456,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ return res, nil, true
+ })
+
+ _, exists, err := reader.fetchLastCommittedOffset(ctx)
+ require.NoError(t, err)
+ assert.False(t, exists)
+ })
+
+ t.Run("should return the committed offset to Kafka", func(t *testing.T) {
+ t.Parallel()
+
+ var (
+ cluster, clusterAddr = testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName)
+ consumer = consumerFunc(func(context.Context, []record) error { return nil })
+ reader = createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second))
+ )
+
+ cluster.ControlKey(int16(kmsg.OffsetFetch), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ req := request.(*kmsg.OffsetFetchRequest)
+ res := req.ResponseKind().(*kmsg.OffsetFetchResponse)
+ res.Default()
+ res.Groups = []kmsg.OffsetFetchResponseGroup{
+ {
+ Group: reader.consumerGroup,
+ Topics: []kmsg.OffsetFetchResponseGroupTopic{
+ {
+ Topic: topicName,
+ Partitions: []kmsg.OffsetFetchResponseGroupTopicPartition{
+ {
+ Partition: partitionID, // Our partition.
+ Offset: 123,
+ }, {
+ Partition: partitionID + 1, // Another partition.
+ Offset: 456,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ return res, nil, true
+ })
+
+ offset, exists, err := reader.fetchLastCommittedOffset(ctx)
+ require.NoError(t, err)
+ assert.True(t, exists)
+ assert.Equal(t, int64(123), offset)
+ })
+}
+
+func TestPartitionCommitter(t *testing.T) {
+ t.Parallel()
+
+ const (
+ topicName = "test-topic"
+ consumerGroup = "test-group"
+ partitionID = 1
+ )
+
+ t.Run("should keep trying to commit offset if the last commit failed, even if the offset has not been incremented", func(t *testing.T) {
+ t.Parallel()
+
+ cluster, clusterAddr := testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName)
+
+ // Mock the cluster to control OffsetCommit request.
+ commitRequestsCount := atomic.NewInt64(0)
+ commitRequestsShouldFail := atomic.NewBool(true)
+
+ cluster.ControlKey(kmsg.OffsetCommit.Int16(), func(kreq kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+
+ res := kreq.ResponseKind().(*kmsg.OffsetCommitResponse)
+ res.Default()
+
+ if commitRequestsShouldFail.Load() {
+ res.Topics = []kmsg.OffsetCommitResponseTopic{
+ {Topic: topicName, Partitions: []kmsg.OffsetCommitResponseTopicPartition{{Partition: partitionID, ErrorCode: kerr.InvalidCommitOffsetSize.Code}}},
+ }
+ } else {
+ res.Topics = []kmsg.OffsetCommitResponseTopic{
+ {Topic: topicName, Partitions: []kmsg.OffsetCommitResponseTopicPartition{{Partition: partitionID}}},
+ }
+ }
+
+ return res, nil, true
+ })
+
+ logger := testutil.NewLogger(t)
+ cfg := createTestKafkaConfig(clusterAddr, topicName)
+ client, err := kgo.NewClient(commonKafkaClientOptions(cfg, nil, logger)...)
+ require.NoError(t, err)
+ t.Cleanup(client.Close)
+
+ adm := kadm.NewClient(client)
+ reg := prometheus.NewPedanticRegistry()
+
+ interval := time.Second
+ committer := newPartitionCommitter(cfg, adm, partitionID, consumerGroup, interval, logger, reg)
+ require.NoError(t, services.StartAndAwaitRunning(context.Background(), committer))
+ t.Cleanup(func() {
+ require.NoError(t, services.StopAndAwaitTerminated(context.Background(), committer))
+ })
+
+ committer.enqueueOffset(123)
+
+ // Since we mocked the Kafka cluster to fail the OffsetCommit requests, we wait until the
+ // first failure is tracked by the partition committer.
+ require.Eventually(t, func() bool {
+ return promtest.ToFloat64(committer.commitFailuresTotal) > 0
+ }, 5*time.Second, 10*time.Millisecond)
+
+ // At least 1 commit failed. Now we unblock it.
+ commitRequestsShouldFail.Store(false)
+
+ // Now we expect the commit to succeed, once the committer will trigger the commit the next interval.
+ test.Poll(t, 10*interval, nil, func() interface{} {
+ return promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_committed_offset The last consumed offset successfully committed by the partition reader. Set to -1 if not offset has been committed yet.
+ # TYPE cortex_ingest_storage_reader_last_committed_offset gauge
+ cortex_ingest_storage_reader_last_committed_offset{partition="1"} 123
+
+ # HELP cortex_ingest_storage_reader_offset_commit_failures_total Total number of failed requests to commit the last consumed offset.
+ # TYPE cortex_ingest_storage_reader_offset_commit_failures_total counter
+ cortex_ingest_storage_reader_offset_commit_failures_total{partition="1"} 1
+
+ # HELP cortex_ingest_storage_reader_offset_commit_requests_total Total number of requests issued to commit the last consumed offset (includes both successful and failed requests).
+ # TYPE cortex_ingest_storage_reader_offset_commit_requests_total counter
+ cortex_ingest_storage_reader_offset_commit_requests_total{partition="1"} 2
+ `),
+ "cortex_ingest_storage_reader_offset_commit_requests_total",
+ "cortex_ingest_storage_reader_offset_commit_failures_total",
+ "cortex_ingest_storage_reader_last_committed_offset")
+ })
+
+ // Since we haven't enqueued any other offset and the last enqueued one has been successfully committed,
+ // we expect the committer to not issue any other request in the future.
+ expectedRequestsCount := commitRequestsCount.Load()
+ time.Sleep(3 * interval)
+ assert.Equal(t, expectedRequestsCount, commitRequestsCount.Load())
+ })
+}
+
+func TestPartitionCommitter_commit(t *testing.T) {
+ t.Parallel()
+
+ const (
+ topicName = "test-topic"
+ consumerGroup = "test-group"
+ partitionID = 1
+ )
+
+ t.Run("should track metrics on successful commit", func(t *testing.T) {
+ t.Parallel()
+
+ _, clusterAddr := testkafka.CreateCluster(t, partitionID+1, topicName)
+
+ cfg := createTestKafkaConfig(clusterAddr, topicName)
+ client, err := kgo.NewClient(commonKafkaClientOptions(cfg, nil, log.NewNopLogger())...)
+ require.NoError(t, err)
+ t.Cleanup(client.Close)
+
+ adm := kadm.NewClient(client)
+ reg := prometheus.NewPedanticRegistry()
+ committer := newPartitionCommitter(cfg, adm, partitionID, consumerGroup, time.Second, log.NewNopLogger(), reg)
+
+ require.NoError(t, committer.commit(context.Background(), 123))
+
+ assert.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_committed_offset The last consumed offset successfully committed by the partition reader. Set to -1 if not offset has been committed yet.
+ # TYPE cortex_ingest_storage_reader_last_committed_offset gauge
+ cortex_ingest_storage_reader_last_committed_offset{partition="1"} 123
+
+ # HELP cortex_ingest_storage_reader_offset_commit_failures_total Total number of failed requests to commit the last consumed offset.
+ # TYPE cortex_ingest_storage_reader_offset_commit_failures_total counter
+ cortex_ingest_storage_reader_offset_commit_failures_total{partition="1"} 0
+
+ # HELP cortex_ingest_storage_reader_offset_commit_requests_total Total number of requests issued to commit the last consumed offset (includes both successful and failed requests).
+ # TYPE cortex_ingest_storage_reader_offset_commit_requests_total counter
+ cortex_ingest_storage_reader_offset_commit_requests_total{partition="1"} 1
+ `),
+ "cortex_ingest_storage_reader_offset_commit_requests_total",
+ "cortex_ingest_storage_reader_offset_commit_failures_total",
+ "cortex_ingest_storage_reader_last_committed_offset"))
+ })
+
+ t.Run("should track metrics on failed commit", func(t *testing.T) {
+ t.Parallel()
+
+ cluster, clusterAddr := testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName)
+
+ // Mock the cluster to fail any offset commit request.
+ cluster.ControlKey(kmsg.OffsetCommit.Int16(), func(kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.KeepControl()
+ return nil, errors.New("mocked error"), true
+ })
+
+ cfg := createTestKafkaConfig(clusterAddr, topicName)
+ client, err := kgo.NewClient(commonKafkaClientOptions(cfg, nil, log.NewNopLogger())...)
+ require.NoError(t, err)
+ t.Cleanup(client.Close)
+
+ adm := kadm.NewClient(client)
+ reg := prometheus.NewPedanticRegistry()
+ committer := newPartitionCommitter(cfg, adm, partitionID, consumerGroup, time.Second, log.NewNopLogger(), reg)
+
+ require.Error(t, committer.commit(context.Background(), 123))
+
+ assert.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(`
+ # HELP cortex_ingest_storage_reader_last_committed_offset The last consumed offset successfully committed by the partition reader. Set to -1 if not offset has been committed yet.
+ # TYPE cortex_ingest_storage_reader_last_committed_offset gauge
+ cortex_ingest_storage_reader_last_committed_offset{partition="1"} -1
+
+ # HELP cortex_ingest_storage_reader_offset_commit_failures_total Total number of failed requests to commit the last consumed offset.
+ # TYPE cortex_ingest_storage_reader_offset_commit_failures_total counter
+ cortex_ingest_storage_reader_offset_commit_failures_total{partition="1"} 1
+
+ # HELP cortex_ingest_storage_reader_offset_commit_requests_total Total number of requests issued to commit the last consumed offset (includes both successful and failed requests).
+ # TYPE cortex_ingest_storage_reader_offset_commit_requests_total counter
+ cortex_ingest_storage_reader_offset_commit_requests_total{partition="1"} 1
+ `),
+ "cortex_ingest_storage_reader_offset_commit_requests_total",
+ "cortex_ingest_storage_reader_offset_commit_failures_total",
+ "cortex_ingest_storage_reader_last_committed_offset"))
+ })
+}
+
func newKafkaProduceClient(t *testing.T, addrs string) *kgo.Client {
writeClient, err := kgo.NewClient(
kgo.SeedBrokers(addrs),
@@ -266,7 +1249,7 @@ type readerTestCfg struct {
kafka KafkaConfig
partitionID int32
consumer recordConsumer
- registry prometheus.Registerer
+ registry *prometheus.Registry
logger log.Logger
commitInterval time.Duration
}
@@ -285,16 +1268,28 @@ func withLastProducedOffsetPollInterval(i time.Duration) func(cfg *readerTestCfg
}
}
-func withRegistry(reg prometheus.Registerer) func(cfg *readerTestCfg) {
+func withMaxConsumerLagAtStartup(maxLag time.Duration) func(cfg *readerTestCfg) {
+ return func(cfg *readerTestCfg) {
+ cfg.kafka.MaxConsumerLagAtStartup = maxLag
+ }
+}
+
+func withConsumeFromPositionAtStartup(position string) func(cfg *readerTestCfg) {
+ return func(cfg *readerTestCfg) {
+ cfg.kafka.ConsumeFromPositionAtStartup = position
+ }
+}
+
+func withRegistry(reg *prometheus.Registry) func(cfg *readerTestCfg) {
return func(cfg *readerTestCfg) {
cfg.registry = reg
}
}
-func defaultReaderTestConfig(addr string, topicName string, partitionID int32, consumer recordConsumer) *readerTestCfg {
+func defaultReaderTestConfig(t *testing.T, addr string, topicName string, partitionID int32, consumer recordConsumer) *readerTestCfg {
return &readerTestCfg{
registry: prometheus.NewPedanticRegistry(),
- logger: log.NewNopLogger(),
+ logger: testutil.NewLogger(t),
kafka: createTestKafkaConfig(addr, topicName),
partitionID: partitionID,
consumer: consumer,
@@ -302,8 +1297,8 @@ func defaultReaderTestConfig(addr string, topicName string, partitionID int32, c
}
}
-func startReader(ctx context.Context, t *testing.T, addr string, topicName string, partitionID int32, consumer recordConsumer, opts ...readerTestCfgOtp) *PartitionReader {
- cfg := defaultReaderTestConfig(addr, topicName, partitionID, consumer)
+func createReader(t *testing.T, addr string, topicName string, partitionID int32, consumer recordConsumer, opts ...readerTestCfgOtp) *PartitionReader {
+ cfg := defaultReaderTestConfig(t, addr, topicName, partitionID, consumer)
for _, o := range opts {
o(cfg)
}
@@ -311,6 +1306,12 @@ func startReader(ctx context.Context, t *testing.T, addr string, topicName strin
require.NoError(t, err)
reader.commitInterval = cfg.commitInterval
+ return reader
+}
+
+func createAndStartReader(ctx context.Context, t *testing.T, addr string, topicName string, partitionID int32, consumer recordConsumer, opts ...readerTestCfgOtp) *PartitionReader {
+ reader := createReader(t, addr, topicName, partitionID, consumer, opts...)
+
require.NoError(t, services.StartAndAwaitRunning(ctx, reader))
t.Cleanup(func() {
require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
@@ -335,7 +1336,7 @@ func TestPartitionReader_Commit(t *testing.T) {
_, clusterAddr := testkafka.CreateCluster(t, partitionID+1, topicName)
consumer := newTestConsumer(3)
- reader := startReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
+ reader := createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
produceRecord(ctx, t, newKafkaProduceClient(t, clusterAddr), topicName, partitionID, []byte("1"))
produceRecord(ctx, t, newKafkaProduceClient(t, clusterAddr), topicName, partitionID, []byte("2"))
@@ -349,7 +1350,7 @@ func TestPartitionReader_Commit(t *testing.T) {
recordsSentAfterShutdown := []byte("4")
produceRecord(ctx, t, newKafkaProduceClient(t, clusterAddr), topicName, partitionID, recordsSentAfterShutdown)
- startReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
+ createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
records, err := consumer.waitRecords(1, time.Second, 0)
assert.NoError(t, err)
@@ -367,7 +1368,7 @@ func TestPartitionReader_Commit(t *testing.T) {
_, clusterAddr := testkafka.CreateCluster(t, partitionID+1, topicName)
consumer := newTestConsumer(4)
- reader := startReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
+ reader := createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
produceRecord(ctx, t, newKafkaProduceClient(t, clusterAddr), topicName, partitionID, []byte("1"))
produceRecord(ctx, t, newKafkaProduceClient(t, clusterAddr), topicName, partitionID, []byte("2"))
@@ -378,7 +1379,7 @@ func TestPartitionReader_Commit(t *testing.T) {
require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
produceRecord(ctx, t, newKafkaProduceClient(t, clusterAddr), topicName, partitionID, []byte("4"))
- startReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
+ createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
// There should be only one record - the one produced after the shutdown.
// The offset of record "3" should have been committed at shutdown and the reader should have resumed from there.
@@ -396,7 +1397,7 @@ func TestPartitionReader_Commit(t *testing.T) {
_, clusterAddr := testkafka.CreateCluster(t, partitionID+1, topicName)
consumer := newTestConsumer(4)
- reader := startReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
+ reader := createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
produceRecord(ctx, t, newKafkaProduceClient(t, clusterAddr), topicName, partitionID, []byte("1"))
produceRecord(ctx, t, newKafkaProduceClient(t, clusterAddr), topicName, partitionID, []byte("2"))
@@ -406,7 +1407,7 @@ func TestPartitionReader_Commit(t *testing.T) {
require.NoError(t, err)
require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
- reader = startReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
+ reader = createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
// No new records since the last commit.
_, err = consumer.waitRecords(0, time.Second, 0)
@@ -414,7 +1415,7 @@ func TestPartitionReader_Commit(t *testing.T) {
// Shut down without having consumed any records.
require.NoError(t, services.StopAndAwaitTerminated(ctx, reader))
- _ = startReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
+ _ = createAndStartReader(ctx, t, clusterAddr, topicName, partitionID, consumer, withCommitInterval(commitInterval))
// No new records since the last commit (2 shutdowns ago).
_, err = consumer.waitRecords(0, time.Second, 0)
diff --git a/pkg/storage/ingest/util.go b/pkg/storage/ingest/util.go
index c662f9d9d0c..57be1907540 100644
--- a/pkg/storage/ingest/util.go
+++ b/pkg/storage/ingest/util.go
@@ -9,7 +9,9 @@ import (
"time"
"github.com/go-kit/log"
+ "github.com/grafana/dskit/middleware"
"github.com/grafana/regexp"
+ "github.com/pkg/errors"
"github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kmsg"
"github.com/twmb/franz-go/plugin/kprom"
@@ -18,16 +20,6 @@ import (
var (
// Regular expression used to parse the ingester numeric ID.
ingesterIDRegexp = regexp.MustCompile("-([0-9]+)$")
-
- // The Prometheus summary objectives used when tracking latency.
- latencySummaryObjectives = map[float64]float64{
- 0.5: 0.05,
- 0.90: 0.01,
- 0.99: 0.001,
- 0.995: 0.001,
- 0.999: 0.001,
- 1: 0.001,
- }
)
// IngesterPartitionID returns the partition ID owner the the given ingester.
@@ -47,7 +39,7 @@ func IngesterPartitionID(ingesterID string) (int32, error) {
}
func commonKafkaClientOptions(cfg KafkaConfig, metrics *kprom.Metrics, logger log.Logger) []kgo.Opt {
- return []kgo.Opt{
+ opts := []kgo.Opt{
kgo.ClientID(cfg.ClientID),
kgo.SeedBrokers(cfg.Address),
kgo.AllowAutoTopicCreation(),
@@ -74,7 +66,6 @@ func commonKafkaClientOptions(cfg KafkaConfig, metrics *kprom.Metrics, logger lo
kgo.MetadataMinAge(10 * time.Second),
kgo.MetadataMaxAge(10 * time.Second),
- kgo.WithHooks(metrics),
kgo.WithLogger(newKafkaLogger(logger)),
kgo.RetryTimeoutFn(func(key int16) time.Duration {
@@ -87,6 +78,12 @@ func commonKafkaClientOptions(cfg KafkaConfig, metrics *kprom.Metrics, logger lo
return 30 * time.Second
}),
}
+
+ if metrics != nil {
+ opts = append(opts, kgo.WithHooks(metrics))
+ }
+
+ return opts
}
// resultPromise is a simple utility to have multiple goroutines waiting for a result from another one.
@@ -121,3 +118,13 @@ func (w *resultPromise[T]) wait(ctx context.Context) (T, error) {
return w.resultValue, w.resultErr
}
}
+
+// shouldLog returns whether err should be logged.
+func shouldLog(ctx context.Context, err error) (bool, string) {
+ var optional middleware.OptionalLogging
+ if !errors.As(err, &optional) {
+ return true, ""
+ }
+
+ return optional.ShouldLog(ctx)
+}
diff --git a/pkg/storage/ingest/writer.go b/pkg/storage/ingest/writer.go
index f6bdff1155c..c98fb967a3b 100644
--- a/pkg/storage/ingest/writer.go
+++ b/pkg/storage/ingest/writer.go
@@ -41,7 +41,7 @@ type Writer struct {
writers map[int32]*kgo.Client
// Metrics.
- writeLatency prometheus.Summary
+ writeLatency prometheus.Histogram
writeBytesTotal prometheus.Counter
// The following settings can only be overridden in tests.
@@ -57,12 +57,13 @@ func NewWriter(kafkaCfg KafkaConfig, logger log.Logger, reg prometheus.Registere
maxInflightProduceRequests: 20,
// Metrics.
- writeLatency: promauto.With(reg).NewSummary(prometheus.SummaryOpts{
- Name: "cortex_ingest_storage_writer_latency_seconds",
- Help: "Latency to write an incoming request to the ingest storage.",
- Objectives: latencySummaryObjectives,
- MaxAge: time.Minute,
- AgeBuckets: 10,
+ writeLatency: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+ Name: "cortex_ingest_storage_writer_latency_seconds",
+ Help: "Latency to write an incoming request to the ingest storage.",
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ NativeHistogramMaxBucketNumber: 100,
+ Buckets: prometheus.DefBuckets,
}),
writeBytesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "cortex_ingest_storage_writer_sent_bytes_total",
diff --git a/pkg/storage/ingest/writer_test.go b/pkg/storage/ingest/writer_test.go
index c1658e4183f..bee7e968782 100644
--- a/pkg/storage/ingest/writer_test.go
+++ b/pkg/storage/ingest/writer_test.go
@@ -11,7 +11,6 @@ import (
"testing"
"time"
- "github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
@@ -56,7 +55,7 @@ func TestWriter_WriteSync(t *testing.T) {
produceRequestProcessed := atomic.NewBool(false)
- cluster.ControlKey(int16(kmsg.Produce), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.ControlKey(int16(kmsg.Produce), func(kmsg.Request) (kmsg.Response, error, bool) {
// Add a delay, so that if WriteSync() will not wait then the test will fail.
time.Sleep(time.Second)
produceRequestProcessed.Store(true)
@@ -239,7 +238,7 @@ func TestWriter_WriteSync(t *testing.T) {
kafkaCfg := createTestKafkaConfig(clusterAddr, topicName)
writer, _ := createTestWriter(t, kafkaCfg)
- cluster.ControlKey(int16(kmsg.Produce), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.ControlKey(int16(kmsg.Produce), func(kmsg.Request) (kmsg.Response, error, bool) {
// Keep failing every request.
cluster.KeepControl()
return nil, errors.New("mock error"), true
@@ -268,7 +267,7 @@ func TestWriter_WriteSync(t *testing.T) {
)
wg.Add(1)
- cluster.ControlKey(int16(kmsg.Produce), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.ControlKey(int16(kmsg.Produce), func(kmsg.Request) (kmsg.Response, error, bool) {
// Ensure the test waits for this too, since the client request will fail earlier
// (if we don't wait, the test will end before this function and then goleak will
// report a goroutine leak).
@@ -396,8 +395,13 @@ func createTestWriter(t *testing.T, cfg KafkaConfig) (*Writer, prometheus.Gather
func createTestKafkaClient(t *testing.T, cfg KafkaConfig) *kgo.Client {
metrics := kprom.NewMetrics("", kprom.Registerer(prometheus.NewPedanticRegistry()))
+ opts := commonKafkaClientOptions(cfg, metrics, test.NewTestingLogger(t))
+
+ // Use the manual partitioner because produceRecord() utility explicitly specifies
+ // the partition to write to in the kgo.Record itself.
+ opts = append(opts, kgo.RecordPartitioner(kgo.ManualPartitioner()))
- client, err := kgo.NewClient(commonKafkaClientOptions(cfg, metrics, log.NewNopLogger())...)
+ client, err := kgo.NewClient(opts...)
require.NoError(t, err)
// Automatically close it at the end of the test.
diff --git a/pkg/storage/tsdb/block/block.go b/pkg/storage/tsdb/block/block.go
index 8e97622e332..be8d04ee3ca 100644
--- a/pkg/storage/tsdb/block/block.go
+++ b/pkg/storage/tsdb/block/block.go
@@ -325,6 +325,16 @@ func GatherFileStats(blockDir string) (res []File, _ error) {
return res, err
}
+// GetMetaAttributes returns the attributes for the block associated with the meta, using the userBucket to read the attributes.
+func GetMetaAttributes(ctx context.Context, meta *Meta, bucketReader objstore.BucketReader) (objstore.ObjectAttributes, error) {
+ metaPath := path.Join(meta.ULID.String(), MetaFilename)
+ attrs, err := bucketReader.Attributes(ctx, metaPath)
+ if err != nil {
+ return objstore.ObjectAttributes{}, errors.Wrapf(err, "unable to get object attributes for %s", metaPath)
+ }
+ return attrs, nil
+}
+
// MarkForNoCompact creates a file which marks block to be not compacted.
func MarkForNoCompact(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, reason NoCompactReason, details string, markedForNoCompact prometheus.Counter) error {
m := path.Join(id.String(), NoCompactMarkFilename)
diff --git a/pkg/storage/tsdb/block/block_test.go b/pkg/storage/tsdb/block/block_test.go
index 5527438a678..3078f2a7895 100644
--- a/pkg/storage/tsdb/block/block_test.go
+++ b/pkg/storage/tsdb/block/block_test.go
@@ -330,7 +330,7 @@ func TestMarkForDeletion(t *testing.T) {
}{
{
name: "block marked for deletion",
- preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
+ preUpload: func(testing.TB, ulid.ULID, objstore.Bucket) {},
blocksMarked: 1,
},
{
@@ -379,7 +379,7 @@ func TestMarkForNoCompact(t *testing.T) {
}{
{
name: "block marked",
- preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
+ preUpload: func(testing.TB, ulid.ULID, objstore.Bucket) {},
blocksMarked: 1,
},
{
@@ -440,7 +440,7 @@ func TestUnMarkForNoCompact(t *testing.T) {
},
},
"unmark non-existing block should fail": {
- setupTest: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {},
+ setupTest: func(testing.TB, ulid.ULID, objstore.Bucket) {},
expectedError: func(id ulid.ULID) error {
return errors.Errorf("deletion of no-compaction marker for block %s has failed: inmem: object not found", id.String())
},
diff --git a/pkg/storage/tsdb/block/fetcher.go b/pkg/storage/tsdb/block/fetcher.go
index 27c50f8809e..fad9a792155 100644
--- a/pkg/storage/tsdb/block/fetcher.go
+++ b/pkg/storage/tsdb/block/fetcher.go
@@ -194,6 +194,7 @@ func (f *MetaFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*Meta, error)
//
// - The block upload is completed: this is the normal case. meta.json file still exists in the
// object storage and it's expected to match the locally cached one (because it's immutable by design).
+ //
// - The block has been marked for deletion: the deletion hasn't started yet, so the full block (including
// the meta.json file) is still in the object storage. This case is not different than the previous one.
//
@@ -417,7 +418,7 @@ func (f *MetaFetcher) Fetch(ctx context.Context) (metas map[ulid.ULID]*Meta, par
}
// FetchWithoutMarkedForDeletion returns all block metas as well as partial blocks (blocks without or with corrupted meta file) from the bucket.
-// This function excludes all blocks for deletion (no deletion delay applied).
+// This function excludes all blocks marked for deletion (no deletion delay applied).
// It's caller responsibility to not change the returned metadata files. Maps can be modified.
//
// Returned error indicates a failure in fetching metadata. Returned meta can be assumed as correct, with some blocks missing.
diff --git a/pkg/storage/tsdb/block/index_test.go b/pkg/storage/tsdb/block/index_test.go
index cf5cdddfd64..ca1d8efe9bd 100644
--- a/pkg/storage/tsdb/block/index_test.go
+++ b/pkg/storage/tsdb/block/index_test.go
@@ -63,7 +63,7 @@ func TestRewrite(t *testing.T) {
totalChunks := 0
ignoredChunks := 0
- require.NoError(t, rewrite(ctx, log.NewNopLogger(), ir, cr, iw, cw, m, []ignoreFnType{func(mint, maxt int64, prev *chunks.Meta, curr *chunks.Meta) (bool, error) {
+ require.NoError(t, rewrite(ctx, log.NewNopLogger(), ir, cr, iw, cw, m, []ignoreFnType{func(_, _ int64, _ *chunks.Meta, curr *chunks.Meta) (bool, error) {
totalChunks++
if curr.OverlapsClosedInterval(excludeTime, excludeTime) {
// Ignores all chunks that overlap with the excludeTime. excludeTime was randomly selected inside the block.
diff --git a/pkg/storage/tsdb/bucketindex/index.go b/pkg/storage/tsdb/bucketindex/index.go
index 16807d39c65..b864f731df1 100644
--- a/pkg/storage/tsdb/bucketindex/index.go
+++ b/pkg/storage/tsdb/bucketindex/index.go
@@ -7,6 +7,7 @@ package bucketindex
import (
"fmt"
+ "maps"
"path/filepath"
"strings"
"time"
@@ -97,6 +98,9 @@ type Block struct {
// Whether the block was from out of order samples
OutOfOrder bool `json:"out_of_order,omitempty"`
+
+ // Labels contains the external labels from the block's metadata.
+ Labels map[string]string `json:"labels,omitempty"`
}
// Within returns whether the block contains samples within the provided range.
@@ -118,6 +122,7 @@ func (m *Block) ThanosMeta() *block.Meta {
if m.OutOfOrder {
compactionHints = []string{tsdb.CompactionHintFromOutOfOrder}
}
+
return &block.Meta{
BlockMeta: tsdb.BlockMeta{
ULID: m.ID,
@@ -133,6 +138,7 @@ func (m *Block) ThanosMeta() *block.Meta {
Version: block.ThanosVersion1,
SegmentFiles: m.thanosMetaSegmentFiles(),
Source: block.SourceType(m.Source),
+ Labels: maps.Clone(m.Labels),
},
}
}
@@ -172,6 +178,7 @@ func BlockFromThanosMeta(meta block.Meta) *Block {
Source: string(meta.Thanos.Source),
CompactionLevel: meta.Compaction.Level,
OutOfOrder: meta.Compaction.FromOutOfOrder(),
+ Labels: maps.Clone(meta.Thanos.Labels),
}
}
diff --git a/pkg/storage/tsdb/bucketindex/index_test.go b/pkg/storage/tsdb/bucketindex/index_test.go
index 7d23ce93a29..82883a60f74 100644
--- a/pkg/storage/tsdb/bucketindex/index_test.go
+++ b/pkg/storage/tsdb/bucketindex/index_test.go
@@ -241,6 +241,10 @@ func TestBlockFromThanosMeta(t *testing.T) {
ID: blockID,
MinTime: 10,
MaxTime: 20,
+ Labels: map[string]string{
+ "a": "b",
+ "c": "d",
+ },
},
},
"meta.json with external labels, with compactor shard ID": {
@@ -263,6 +267,11 @@ func TestBlockFromThanosMeta(t *testing.T) {
MinTime: 10,
MaxTime: 20,
CompactorShardID: "10_of_20",
+ Labels: map[string]string{
+ "a": "b",
+ "c": "d",
+ mimir_tsdb.CompactorShardIDExternalLabel: "10_of_20",
+ },
},
},
"meta.json with external labels, with invalid shard ID": {
@@ -285,6 +294,11 @@ func TestBlockFromThanosMeta(t *testing.T) {
MinTime: 10,
MaxTime: 20,
CompactorShardID: "some weird value",
+ Labels: map[string]string{
+ "a": "b",
+ "c": "d",
+ mimir_tsdb.CompactorShardIDExternalLabel: "some weird value",
+ },
},
},
}
@@ -401,6 +415,28 @@ func TestBlock_ThanosMeta(t *testing.T) {
},
},
},
+ "block with labels": {
+ block: Block{
+ ID: blockID,
+ MinTime: 10,
+ MaxTime: 20,
+ SegmentsFormat: SegmentsFormatUnknown,
+ SegmentsNum: 0,
+ Labels: map[string]string{"my_key": "0x8413"},
+ },
+ expected: &block.Meta{
+ BlockMeta: tsdb.BlockMeta{
+ ULID: blockID,
+ MinTime: 10,
+ MaxTime: 20,
+ Version: block.TSDBVersion1,
+ },
+ Thanos: block.ThanosMeta{
+ Version: block.ThanosVersion1,
+ Labels: map[string]string{"my_key": "0x8413"},
+ },
+ },
+ },
}
for testName, testData := range tests {
diff --git a/pkg/storage/tsdb/bucketindex/updater_test.go b/pkg/storage/tsdb/bucketindex/updater_test.go
index 44a88e83af7..b1bfa39b5cb 100644
--- a/pkg/storage/tsdb/bucketindex/updater_test.go
+++ b/pkg/storage/tsdb/bucketindex/updater_test.go
@@ -8,6 +8,7 @@ package bucketindex
import (
"bytes"
"context"
+ "maps"
"path"
"testing"
"time"
@@ -188,11 +189,14 @@ func TestUpdater_UpdateIndexFromVersion1ToVersion2(t *testing.T) {
block1 := block.MockStorageBlockWithExtLabels(t, bkt, userID, 10, 20, map[string]string{mimir_tsdb.CompactorShardIDExternalLabel: "1_of_4"})
block2 := block.MockStorageBlockWithExtLabels(t, bkt, userID, 20, 30, map[string]string{mimir_tsdb.CompactorShardIDExternalLabel: "3_of_4"})
+ // Make copies of blocks without the compactor shard ID label.
block1WithoutCompactorShardID := block1
- block1WithoutCompactorShardID.Thanos.Labels = nil
+ block1WithoutCompactorShardID.Thanos.Labels = maps.Clone(block1.Thanos.Labels)
+ delete(block1WithoutCompactorShardID.Thanos.Labels, mimir_tsdb.CompactorShardIDExternalLabel)
block2WithoutCompactorShardID := block2
- block2WithoutCompactorShardID.Thanos.Labels = nil
+ block2WithoutCompactorShardID.Thanos.Labels = maps.Clone(block2.Thanos.Labels)
+ delete(block2WithoutCompactorShardID.Thanos.Labels, mimir_tsdb.CompactorShardIDExternalLabel)
// Double check that original block1 and block2 still have compactor shards set.
require.Equal(t, "1_of_4", block1.Thanos.Labels[mimir_tsdb.CompactorShardIDExternalLabel])
@@ -209,6 +213,7 @@ func TestUpdater_UpdateIndexFromVersion1ToVersion2(t *testing.T) {
// Now remove Compactor Shard ID from index.
for _, b := range returnedIdx.Blocks {
b.CompactorShardID = ""
+ delete(b.Labels, mimir_tsdb.CompactorShardIDExternalLabel)
}
// Try to update existing index. Since we didn't change the version, updater will reuse the index, and not update CompactorShardID field.
@@ -253,6 +258,7 @@ func assertBucketIndexEqual(t testing.TB, idx *Index, bkt objstore.Bucket, userI
Source: "test",
CompactionLevel: 1,
OutOfOrder: false,
+ Labels: b.Thanos.Labels,
})
}
diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go
index 76e6923cdfe..5a6bc1ceb9e 100644
--- a/pkg/storage/tsdb/config.go
+++ b/pkg/storage/tsdb/config.go
@@ -264,6 +264,9 @@ type TSDBConfig struct {
// HeadCompactionIntervalJitterEnabled is enabled by default, but allows to disable it in tests.
HeadCompactionIntervalJitterEnabled bool `yaml:"-"`
+ // HeadCompactionIntervalWhileStarting setting is hardcoded, but allowed to overwrite it in tests.
+ HeadCompactionIntervalWhileStarting time.Duration `yaml:"-"`
+
// TimelyHeadCompaction allows head compaction to happen when min block range can no longer be appended,
// without requiring 1.5x the chunk range worth of data in the head.
TimelyHeadCompaction bool `yaml:"timely_head_compaction_enabled" category:"experimental"`
@@ -314,6 +317,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.TimelyHeadCompaction, "blocks-storage.tsdb.timely-head-compaction-enabled", false, "Allows head compaction to happen when the min block range can no longer be appended, without requiring 1.5x the chunk range worth of data in the head.")
cfg.HeadCompactionIntervalJitterEnabled = true
+ cfg.HeadCompactionIntervalWhileStarting = 30 * time.Second
}
// Validate the config.
diff --git a/pkg/storage/tsdb/config_test.go b/pkg/storage/tsdb/config_test.go
index 19d8ac3075c..591e22f6c73 100644
--- a/pkg/storage/tsdb/config_test.go
+++ b/pkg/storage/tsdb/config_test.go
@@ -25,98 +25,98 @@ func TestConfig_Validate(t *testing.T) {
expectedErr error
}{
"should pass on S3 backend": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.Bucket.Backend = "s3"
},
expectedErr: nil,
},
"should pass on GCS backend": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.Bucket.Backend = "gcs"
},
expectedErr: nil,
},
"should fail on unknown storage backend": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.Bucket.Backend = "unknown"
},
expectedErr: bucket.ErrUnsupportedStorageBackend,
},
"should fail on invalid ship concurrency": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.ShipConcurrency = 0
},
expectedErr: errInvalidShipConcurrency,
},
"should pass on invalid ship concurrency but shipping is disabled": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.ShipConcurrency = 0
cfg.TSDB.ShipInterval = 0
},
expectedErr: nil,
},
"should fail on invalid compaction interval": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.HeadCompactionInterval = 0
},
expectedErr: errInvalidCompactionInterval,
},
"should fail on too high compaction interval": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.HeadCompactionInterval = 20 * time.Minute
},
expectedErr: errInvalidCompactionInterval,
},
"should fail on invalid compaction concurrency": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.HeadCompactionConcurrency = 0
},
expectedErr: errInvalidCompactionConcurrency,
},
"should pass on valid compaction concurrency": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.HeadCompactionConcurrency = 10
},
expectedErr: nil,
},
"should fail on negative stripe size": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.StripeSize = -2
},
expectedErr: errInvalidStripeSize,
},
"should fail on stripe size 0": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.StripeSize = 0
},
expectedErr: errInvalidStripeSize,
},
"should fail on stripe size 1": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.StripeSize = 1
},
expectedErr: errInvalidStripeSize,
},
"should pass on valid stripe size": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.StripeSize = 1 << 14
},
expectedErr: nil,
},
"should fail on empty block ranges": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.BlockRanges = nil
},
expectedErr: errEmptyBlockranges,
},
"should fail on invalid TSDB WAL segment size": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.WALSegmentSizeBytes = 0
},
expectedErr: errInvalidWALSegmentSizeBytes,
},
"should fail on invalid store-gateway streaming batch size": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.BucketStore.StreamingBatchSize = 0
},
expectedErr: errInvalidStreamingBatchSize,
@@ -129,7 +129,7 @@ func TestConfig_Validate(t *testing.T) {
expectedErr: errEarlyCompactionRequiresActiveSeries,
},
"should fail on invalid forced compaction min series reduction percentage": {
- setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) {
+ setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) {
cfg.TSDB.EarlyHeadCompactionMinEstimatedSeriesReductionPercentage = 101
},
expectedErr: errInvalidEarlyHeadCompactionMinSeriesReduction,
diff --git a/pkg/storage/tsdb/users_scanner_test.go b/pkg/storage/tsdb/users_scanner_test.go
index cc00a5ec87d..0af741d7778 100644
--- a/pkg/storage/tsdb/users_scanner_test.go
+++ b/pkg/storage/tsdb/users_scanner_test.go
@@ -43,7 +43,7 @@ func TestUsersScanner_ScanUsers_ShouldReturnUsersForWhichOwnerCheckOrTenantDelet
bucketClient.MockExists(path.Join("user-1", TenantDeletionMarkPath), false, nil)
bucketClient.MockExists(path.Join("user-2", TenantDeletionMarkPath), false, errors.New("fail"))
- isOwned := func(userID string) (bool, error) {
+ isOwned := func(string) (bool, error) {
return false, errors.New("failed to check if user is owned")
}
diff --git a/pkg/storegateway/bucket_chunk_reader_test.go b/pkg/storegateway/bucket_chunk_reader_test.go
index 8951228b22f..03bce0fc253 100644
--- a/pkg/storegateway/bucket_chunk_reader_test.go
+++ b/pkg/storegateway/bucket_chunk_reader_test.go
@@ -54,12 +54,12 @@ func TestBucketChunkReader_refetchChunks(t *testing.T) {
// Each func takes the estimated length and returns a new length.
chunkLengthSkewingFuncs := map[string]func(uint32) uint32{
- "tsdb.EstimatedMaxChunkSize": func(chunkLength uint32) uint32 { return tsdb.EstimatedMaxChunkSize },
- "10xtsdb.EstimatedMaxChunkSize": func(chunkLength uint32) uint32 { return 10 * tsdb.EstimatedMaxChunkSize },
+ "tsdb.EstimatedMaxChunkSize": func(uint32) uint32 { return tsdb.EstimatedMaxChunkSize },
+ "10xtsdb.EstimatedMaxChunkSize": func(uint32) uint32 { return 10 * tsdb.EstimatedMaxChunkSize },
"size-1": func(chunkLength uint32) uint32 { return chunkLength - 1 },
"size/2": func(chunkLength uint32) uint32 { return chunkLength / 2 },
- "1": func(chunkLength uint32) uint32 { return 1 },
- "0": func(chunkLength uint32) uint32 { return 0 },
+ "1": func(uint32) uint32 { return 1 },
+ "0": func(uint32) uint32 { return 0 },
}
for name, skewChunkLen := range chunkLengthSkewingFuncs {
diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go
index e66b23d0566..e98a41ad843 100644
--- a/pkg/storegateway/bucket_stores_test.go
+++ b/pkg/storegateway/bucket_stores_test.go
@@ -323,7 +323,7 @@ func TestBucketStores_syncUsersBlocks(t *testing.T) {
// Sync user stores and count the number of times the callback is called.
var storesCount atomic.Int32
- err = stores.syncUsersBlocks(context.Background(), func(ctx context.Context, bs *BucketStore) error {
+ err = stores.syncUsersBlocks(context.Background(), func(context.Context, *BucketStore) error {
storesCount.Inc()
return nil
})
diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go
index c68bb012d6a..1d89be74739 100644
--- a/pkg/storegateway/bucket_test.go
+++ b/pkg/storegateway/bucket_test.go
@@ -340,7 +340,7 @@ func TestBlockLabelNames(t *testing.T) {
onLabelNamesCalled: func() error {
return fmt.Errorf("not expected the LabelNames() calls with matchers")
},
- onLabelValuesOffsetsCalled: func(name string) error {
+ onLabelValuesOffsetsCalled: func(string) error {
expectedCalls--
if expectedCalls < 0 {
return fmt.Errorf("didn't expect another index.Reader.LabelValues() call")
@@ -392,7 +392,7 @@ func TestBlockLabelValues(t *testing.T) {
b := newTestBucketBlock()
b.indexHeaderReader = &interceptedIndexReader{
Reader: b.indexHeaderReader,
- onLabelValuesOffsetsCalled: func(name string) error { return context.DeadlineExceeded },
+ onLabelValuesOffsetsCalled: func(string) error { return context.DeadlineExceeded },
}
b.indexCache = cacheNotExpectingToStoreLabelValues{t: t}
@@ -405,7 +405,7 @@ func TestBlockLabelValues(t *testing.T) {
b := newTestBucketBlock()
b.indexHeaderReader = &interceptedIndexReader{
Reader: b.indexHeaderReader,
- onLabelValuesOffsetsCalled: func(name string) error {
+ onLabelValuesOffsetsCalled: func(string) error {
expectedCalls--
if expectedCalls < 0 {
return fmt.Errorf("didn't expect another index.Reader.LabelValues() call")
@@ -1904,7 +1904,7 @@ func TestBucketStore_Series_RequestAndResponseHints(t *testing.T) {
tb, store, seriesSet1, seriesSet2, block1, block2, cleanup := setupStoreForHintsTest(t, 5000)
tb.Cleanup(cleanup)
for _, streamingBatchSize := range []int{0, 1, 5} {
- t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) {
+ t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(*testing.T) {
runTestServerSeries(tb, store, streamingBatchSize, newTestCases(seriesSet1, seriesSet2, block1, block2)...)
})
}
diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go
index c7d39710630..27a0e5fe055 100644
--- a/pkg/storegateway/gateway_test.go
+++ b/pkg/storegateway/gateway_test.go
@@ -63,17 +63,17 @@ func TestConfig_Validate(t *testing.T) {
expected error
}{
"should pass by default": {
- setup: func(cfg *Config, limits *validation.Limits) {},
+ setup: func(*Config, *validation.Limits) {},
expected: nil,
},
"should fail if shard size is negative": {
- setup: func(cfg *Config, limits *validation.Limits) {
+ setup: func(_ *Config, limits *validation.Limits) {
limits.StoreGatewayTenantShardSize = -3
},
expected: errInvalidTenantShardSize,
},
"should pass if shard size has been set": {
- setup: func(cfg *Config, limits *validation.Limits) {
+ setup: func(_ *Config, limits *validation.Limits) {
limits.StoreGatewayTenantShardSize = 3
},
expected: nil,
@@ -193,7 +193,7 @@ func TestStoreGateway_InitialSyncFailure(t *testing.T) {
ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil)
t.Cleanup(func() { assert.NoError(t, closer.Close()) })
- bucketClient := &bucket.ErrorInjectedBucketClient{Injector: func(operation bucket.Operation, s string) error { return assert.AnError }}
+ bucketClient := &bucket.ErrorInjectedBucketClient{Injector: func(bucket.Operation, string) error { return assert.AnError }}
g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), log.NewLogfmtLogger(os.Stdout), nil, nil)
require.NoError(t, err)
diff --git a/pkg/storegateway/indexcache/remote_test.go b/pkg/storegateway/indexcache/remote_test.go
index c4a89e3b201..a9d695c14d1 100644
--- a/pkg/storegateway/indexcache/remote_test.go
+++ b/pkg/storegateway/indexcache/remote_test.go
@@ -215,7 +215,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) {
mockedErr error
fetchUserID string
fetchBlockID ulid.ULID
- fetchIds []storage.SeriesRef
+ fetchIDs []storage.SeriesRef
expectedHits map[storage.SeriesRef][]byte
expectedMisses []storage.SeriesRef
}{
@@ -223,7 +223,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) {
setup: []mockedSeriesForRef{},
fetchUserID: user1,
fetchBlockID: block1,
- fetchIds: []storage.SeriesRef{1, 2},
+ fetchIDs: []storage.SeriesRef{1, 2},
expectedHits: nil,
expectedMisses: []storage.SeriesRef{1, 2},
},
@@ -237,7 +237,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) {
},
fetchUserID: user1,
fetchBlockID: block1,
- fetchIds: []storage.SeriesRef{1, 2},
+ fetchIDs: []storage.SeriesRef{1, 2},
expectedHits: map[storage.SeriesRef][]byte{
1: value1,
2: value2,
@@ -251,7 +251,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) {
},
fetchUserID: user1,
fetchBlockID: block1,
- fetchIds: []storage.SeriesRef{1, 2},
+ fetchIDs: []storage.SeriesRef{1, 2},
expectedHits: map[storage.SeriesRef][]byte{1: value1},
expectedMisses: []storage.SeriesRef{2},
},
@@ -264,7 +264,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) {
mockedErr: errors.New("mocked error"),
fetchUserID: user1,
fetchBlockID: block1,
- fetchIds: []storage.SeriesRef{1, 2},
+ fetchIDs: []storage.SeriesRef{1, 2},
expectedHits: nil,
expectedMisses: []storage.SeriesRef{1, 2},
},
@@ -283,12 +283,12 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) {
}
// Fetch series from cached and assert on it.
- hits, misses := c.FetchMultiSeriesForRefs(ctx, testData.fetchUserID, testData.fetchBlockID, testData.fetchIds)
+ hits, misses := c.FetchMultiSeriesForRefs(ctx, testData.fetchUserID, testData.fetchBlockID, testData.fetchIDs)
assert.Equal(t, testData.expectedHits, hits)
assert.Equal(t, testData.expectedMisses, misses)
// Assert on metrics.
- assert.Equal(t, float64(len(testData.fetchIds)), prom_testutil.ToFloat64(c.requests.WithLabelValues(cacheTypeSeriesForRef)))
+ assert.Equal(t, float64(len(testData.fetchIDs)), prom_testutil.ToFloat64(c.requests.WithLabelValues(cacheTypeSeriesForRef)))
assert.Equal(t, float64(len(testData.expectedHits)), prom_testutil.ToFloat64(c.hits.WithLabelValues(cacheTypeSeriesForRef)))
for _, typ := range remove(allCacheTypes, cacheTypeSeriesForRef) {
assert.Equal(t, 0.0, prom_testutil.ToFloat64(c.requests.WithLabelValues(typ)))
diff --git a/pkg/storegateway/indexheader/lazy_binary_reader_test.go b/pkg/storegateway/indexheader/lazy_binary_reader_test.go
index 93cd0b3b7ae..785f633ef69 100644
--- a/pkg/storegateway/indexheader/lazy_binary_reader_test.go
+++ b/pkg/storegateway/indexheader/lazy_binary_reader_test.go
@@ -34,7 +34,7 @@ func TestNewLazyBinaryReader_ShouldFailIfUnableToBuildIndexHeader(t *testing.T)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, bkt.Close()) })
- testLazyBinaryReader(t, bkt, tmpDir, ulid.ULID{}, func(t *testing.T, r *LazyBinaryReader, err error) {
+ testLazyBinaryReader(t, bkt, tmpDir, ulid.ULID{}, func(t *testing.T, _ *LazyBinaryReader, err error) {
require.Error(t, err)
})
}
diff --git a/pkg/storegateway/indexheader/reader_benchmarks_test.go b/pkg/storegateway/indexheader/reader_benchmarks_test.go
index 0f03cc877a8..2fbfe210f03 100644
--- a/pkg/storegateway/indexheader/reader_benchmarks_test.go
+++ b/pkg/storegateway/indexheader/reader_benchmarks_test.go
@@ -177,7 +177,7 @@ func BenchmarkLabelValuesOffsetsIndexV1(b *testing.B) {
for i := 0; i < b.N; i++ {
name := names[i%len(names)]
- values, err := br.LabelValuesOffsets(name, "", func(s string) bool {
+ values, err := br.LabelValuesOffsets(name, "", func(string) bool {
return true
})
@@ -221,7 +221,7 @@ func BenchmarkLabelValuesOffsetsIndexV2(b *testing.B) {
for i := 0; i < b.N; i++ {
name := names[i%len(names)]
- values, err := br.LabelValuesOffsets(name, "", func(s string) bool {
+ values, err := br.LabelValuesOffsets(name, "", func(string) bool {
return true
})
diff --git a/pkg/storegateway/indexheader/stream_binary_reader.go b/pkg/storegateway/indexheader/stream_binary_reader.go
index fabc023105c..de08bc4200c 100644
--- a/pkg/storegateway/indexheader/stream_binary_reader.go
+++ b/pkg/storegateway/indexheader/stream_binary_reader.go
@@ -190,7 +190,7 @@ func (r *StreamBinaryReader) loadFromSparseIndexHeader(logger *spanlogger.SpanLo
level.Info(logger).Log("msg", "loaded sparse index-header from disk", "id", id, "path", sparseHeadersPath, "elapsed", time.Since(start))
}()
- level.Info(logger).Log("msg", "loading from sparse index-header from disk", "id", id, "path", sparseHeadersPath)
+ level.Info(logger).Log("msg", "loading sparse index-header from disk", "id", id, "path", sparseHeadersPath)
sparseHeaders := &indexheaderpb.Sparse{}
gzipped := bytes.NewReader(sparseData)
diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go
index 5ce1f159736..ee5070e6cfd 100644
--- a/pkg/storegateway/series_refs_test.go
+++ b/pkg/storegateway/series_refs_test.go
@@ -1489,7 +1489,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
- newTestBlock := prepareTestBlock(test.NewTB(t), func(tb testing.TB, appenderFactory func() storage.Appender) {
+ newTestBlock := prepareTestBlock(test.NewTB(t), func(_ testing.TB, appenderFactory func() storage.Appender) {
const (
samplesFor1Chunk = 100 // not a complete chunk
samplesFor2Chunks = samplesFor1Chunk * 2 // not a complete chunk
@@ -2008,10 +2008,10 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) {
for ts := int64(0); ts < 10; ts++ {
for _, s := range existingSeries {
_, err := appender.Append(0, s, ts, 0)
- assert.NoError(t, err)
+ assert.NoError(tb, err)
}
}
- assert.NoError(t, appender.Commit())
+ assert.NoError(tb, appender.Commit())
})
mockedSeriesHashes := map[string]uint64{
diff --git a/pkg/storegateway/sharding_strategy_test.go b/pkg/storegateway/sharding_strategy_test.go
index 063855f7cdd..28207b8d118 100644
--- a/pkg/storegateway/sharding_strategy_test.go
+++ b/pkg/storegateway/sharding_strategy_test.go
@@ -363,7 +363,7 @@ func TestShuffleShardingStrategy(t *testing.T) {
t.Cleanup(func() { assert.NoError(t, closer.Close()) })
// Initialize the ring state.
- require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) {
+ require.NoError(t, store.CAS(ctx, "test", func(interface{}) (interface{}, bool, error) {
d := ring.NewDesc()
testData.setupRing(d)
return d, true, nil
diff --git a/pkg/usagestats/seed_test.go b/pkg/usagestats/seed_test.go
index ad6a2902cce..80c5647092a 100644
--- a/pkg/usagestats/seed_test.go
+++ b/pkg/usagestats/seed_test.go
@@ -127,14 +127,14 @@ func TestWaitSeedFileStability(t *testing.T) {
}
tests := map[string]func(t *testing.T, bucketClient *bucket.ClientMock) testExpectations{
- "should immediately return if seed file does not exist": func(t *testing.T, bucketClient *bucket.ClientMock) testExpectations {
+ "should immediately return if seed file does not exist": func(_ *testing.T, bucketClient *bucket.ClientMock) testExpectations {
bucketClient.MockGet(ClusterSeedFileName, "", bucket.ErrObjectDoesNotExist)
return testExpectations{
expectedErr: bucket.ErrObjectDoesNotExist,
}
},
- "should immediately return if seed file is corrupted": func(t *testing.T, bucketClient *bucket.ClientMock) testExpectations {
+ "should immediately return if seed file is corrupted": func(_ *testing.T, bucketClient *bucket.ClientMock) testExpectations {
bucketClient.MockGet(ClusterSeedFileName, "xxx", nil)
return testExpectations{
@@ -224,7 +224,7 @@ func TestInitSeedFile(t *testing.T) {
expectedMinDuration: minStability,
}
},
- "should create the seed file if doesn't exist and then wait for 'min stability'": func(t *testing.T, bucketClient objstore.Bucket) testExpectations {
+ "should create the seed file if doesn't exist and then wait for 'min stability'": func(*testing.T, objstore.Bucket) testExpectations {
return testExpectations{
expectedMinDuration: minStability,
}
diff --git a/pkg/util/fieldcategory/overrides.go b/pkg/util/configdoc/fieldcategory.go
similarity index 87%
rename from pkg/util/fieldcategory/overrides.go
rename to pkg/util/configdoc/fieldcategory.go
index 9e5af1a77c8..d3f9eb83fca 100644
--- a/pkg/util/fieldcategory/overrides.go
+++ b/pkg/util/configdoc/fieldcategory.go
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: AGPL-3.0-only
-package fieldcategory
+package configdoc
import "fmt"
@@ -34,8 +34,8 @@ func (c Category) String() string {
// Fields are primarily categorized via struct tags, but this can be impossible when third party libraries are involved
// Only categorize fields here when you can't otherwise, since struct tags are less likely to become stale
-var overrides = map[string]Category{
- // weaveworks/common/server in server.Config
+var categoryOverrides = map[string]Category{
+ // Defined in grafana/dskit/server.Config
"server.graceful-shutdown-timeout": Advanced,
"server.grpc-conn-limit": Advanced,
"server.grpc-listen-network": Advanced,
@@ -69,25 +69,26 @@ var overrides = map[string]Category{
"server.path-prefix": Advanced,
"server.register-instrumentation": Advanced,
"server.log-request-at-info-level-enabled": Advanced,
+ "server.proxy-protocol-enabled": Experimental,
// main.go global flags
"config.file": Basic,
"config.expand-env": Basic,
}
-func AddOverrides(o map[string]Category) {
+func AddCategoryOverrides(o map[string]Category) {
for n, c := range o {
- overrides[n] = c
+ categoryOverrides[n] = c
}
}
-func GetOverride(fieldName string) (category Category, ok bool) {
- category, ok = overrides[fieldName]
+func GetCategoryOverride(fieldName string) (category Category, ok bool) {
+ category, ok = categoryOverrides[fieldName]
return
}
-func VisitOverrides(f func(name string)) {
- for override := range overrides {
+func VisitCategoryOverrides(f func(name string)) {
+ for override := range categoryOverrides {
f(override)
}
}
diff --git a/pkg/util/configdoc/hidden.go b/pkg/util/configdoc/hidden.go
new file mode 100644
index 00000000000..f74a5ee8659
--- /dev/null
+++ b/pkg/util/configdoc/hidden.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+
+package configdoc
+
+var hiddenOverrides = map[string]bool{
+ // Defined in grafana/dskit/server.Config
+ "server.grpc.stats-tracking-enabled": true,
+ "server.grpc.recv-buffer-pools-enabled": true,
+}
+
+func GetHiddenOverride(fieldName string) (isHidden, ok bool) {
+ isHidden, ok = hiddenOverrides[fieldName]
+ return
+}
diff --git a/pkg/util/flags_test.go b/pkg/util/flags_test.go
index d214b316195..bbcf2c1b592 100644
--- a/pkg/util/flags_test.go
+++ b/pkg/util/flags_test.go
@@ -19,7 +19,7 @@ func TestTrackRegisteredFlags(t *testing.T) {
var previous, registered, nonPrefixed string
fs.StringVar(&previous, "previous.flag", "previous", "")
- rf := TrackRegisteredFlags(prefix, fs, func(prefix string, f *flag.FlagSet) {
+ rf := TrackRegisteredFlags(prefix, fs, func(prefix string, _ *flag.FlagSet) {
fs.StringVar(®istered, prefix+flagName, "registered", "")
fs.StringVar(&nonPrefixed, flagName, "non-prefixed", "")
})
diff --git a/pkg/util/globalerror/errors.go b/pkg/util/globalerror/errors.go
index 2b26569324c..1a35681272f 100644
--- a/pkg/util/globalerror/errors.go
+++ b/pkg/util/globalerror/errors.go
@@ -72,6 +72,14 @@ const (
BucketIndexTooOld ID = "bucket-index-too-old"
DistributorMaxWriteMessageSize ID = "distributor-max-write-message-size"
+
+ // Map Prometheus TSDB native histogram validation errors to Mimir errors.
+ // E.g. histogram.ErrHistogramCountNotBigEnough -> NativeHistogramCountNotBigEnough
+ NativeHistogramCountMismatch ID = "native-histogram-count-mismatch"
+ NativeHistogramCountNotBigEnough ID = "native-histogram-count-not-big-enough"
+ NativeHistogramNegativeBucketCount ID = "native-histogram-negative-bucket-count"
+ NativeHistogramSpanNegativeOffset ID = "native-histogram-span-negative-offset"
+ NativeHistogramSpansBucketsMismatch ID = "native-histogram-spans-buckets-mismatch"
)
// Message returns the provided msg, appending the error id.
diff --git a/pkg/util/grpc_test.go b/pkg/util/grpc_test.go
index 1dca9a0c93a..01431fd151e 100644
--- a/pkg/util/grpc_test.go
+++ b/pkg/util/grpc_test.go
@@ -3,15 +3,20 @@
package util
import (
+ "context"
"errors"
"io"
"testing"
"time"
"github.com/stretchr/testify/require"
+
+ "github.com/grafana/mimir/pkg/util/test"
)
func TestCloseAndExhaust(t *testing.T) {
+ test.VerifyNoLeak(t)
+
t.Run("CloseSend returns an error", func(t *testing.T) {
expectedErr := errors.New("something went wrong")
stream := &mockStream{closeSendError: expectedErr}
@@ -33,7 +38,10 @@ func TestCloseAndExhaust(t *testing.T) {
})
t.Run("Recv blocks forever", func(t *testing.T) {
- stream := &mockStream{}
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ stream := &mockStream{recvCtx: ctx}
returned := make(chan error)
go func() {
@@ -51,6 +59,7 @@ func TestCloseAndExhaust(t *testing.T) {
type mockStream struct {
closeSendError error
+ recvCtx context.Context
recvErrors []error
}
@@ -60,7 +69,12 @@ func (m *mockStream) CloseSend() error {
func (m *mockStream) Recv() (string, error) {
if len(m.recvErrors) == 0 {
- // Block forever.
+ // Block forever, unless the context is canceled (if provided).
+ if m.recvCtx != nil {
+ <-m.recvCtx.Done()
+ return "", m.recvCtx.Err()
+ }
+
<-make(chan struct{})
}
diff --git a/pkg/util/gziphandler/gzip_test.go b/pkg/util/gziphandler/gzip_test.go
index cdc374e1355..fbacaf42d61 100644
--- a/pkg/util/gziphandler/gzip_test.go
+++ b/pkg/util/gziphandler/gzip_test.go
@@ -157,7 +157,7 @@ func TestGzipHandlerAlreadyCompressed(t *testing.T) {
}
func TestNewGzipLevelHandler(t *testing.T) {
- handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = io.WriteString(w, testBody)
})
@@ -216,7 +216,7 @@ func TestGzipHandlerNoBody(t *testing.T) {
}
for num, test := range tests {
- handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(test.statusCode)
if test.body != nil {
_, _ = w.Write(test.body)
@@ -284,7 +284,7 @@ func TestGzipHandlerContentLength(t *testing.T) {
go func() { _ = srv.Serve(ln) }()
for num, test := range tests {
- srv.Handler = GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ srv.Handler = GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
if test.bodyLen > 0 {
w.Header().Set("Content-Length", strconv.Itoa(test.bodyLen))
}
@@ -336,13 +336,13 @@ func TestGzipHandlerMinSize(t *testing.T) {
wrapper, _ := NewGzipLevelAndMinSize(gzip.DefaultCompression, 128)
handler := wrapper(http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
+ func(w http.ResponseWriter, _ *http.Request) {
// Write responses one byte at a time to ensure that the flush
// mechanism, if used, is working properly.
for i := 0; i < responseLength; i++ {
n, err := w.Write(b)
assert.Equal(t, 1, n)
- assert.Nil(t, err)
+ assert.NoError(t, err)
}
},
))
@@ -372,7 +372,7 @@ func TestGzipDoubleClose(t *testing.T) {
// aren't added back by double close
addLevelPool(gzip.DefaultCompression)
- handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
// call close here and it'll get called again interally by
// NewGzipLevelHandler's handler defer
_, _ = w.Write([]byte("test"))
@@ -406,7 +406,7 @@ func (w *panicOnSecondWriteHeaderWriter) WriteHeader(s int) {
}
func TestGzipHandlerDoubleWriteHeader(t *testing.T) {
- handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Length", "15000")
// Specifically write the header here
w.WriteHeader(304)
@@ -459,7 +459,7 @@ func TestStatusCodes(t *testing.T) {
func TestFlushBeforeWrite(t *testing.T) {
b := []byte(testBody)
- handler := GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ handler := GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) {
rw.WriteHeader(http.StatusNotFound)
rw.(http.Flusher).Flush()
_, _ = rw.Write(b)
@@ -478,14 +478,14 @@ func TestFlushBeforeWrite(t *testing.T) {
func TestImplementFlusher(t *testing.T) {
request := httptest.NewRequest(http.MethodGet, "/", nil)
request.Header.Set(acceptEncoding, "gzip")
- GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) {
_, okFlusher := rw.(http.Flusher)
assert.True(t, okFlusher, "response writer must implement http.Flusher")
})).ServeHTTP(httptest.NewRecorder(), request)
}
func TestIgnoreSubsequentWriteHeader(t *testing.T) {
- handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(500)
w.WriteHeader(404)
}))
@@ -505,7 +505,7 @@ func TestDontWriteWhenNotWrittenTo(t *testing.T) {
// ensure the gzip middleware doesn't touch the actual ResponseWriter
// either.
- handler0 := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler0 := GzipHandler(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
}))
handler1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -588,7 +588,7 @@ var contentTypeTests = []struct {
func TestContentTypes(t *testing.T) {
for _, tt := range contentTypeTests {
- handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", tt.contentType)
_, _ = io.WriteString(w, testBody)
diff --git a/pkg/util/instrumentation/tracer_transport_test.go b/pkg/util/instrumentation/tracer_transport_test.go
index 917ca5371eb..4a25a66b5fb 100644
--- a/pkg/util/instrumentation/tracer_transport_test.go
+++ b/pkg/util/instrumentation/tracer_transport_test.go
@@ -26,7 +26,7 @@ func TestTracerTransportPropagatesTrace(t *testing.T) {
}{
{
name: "no next transport",
- handlerAssert: func(t *testing.T, req *http.Request) {},
+ handlerAssert: func(*testing.T, *http.Request) {},
},
{
name: "with next transport",
@@ -45,7 +45,7 @@ func TestTracerTransportPropagatesTrace(t *testing.T) {
defer closer.Close()
observedTraceID := make(chan string, 2)
- handler := middleware.Tracer{}.Wrap(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := middleware.Tracer{}.Wrap(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
sp := opentracing.SpanFromContext(r.Context())
defer sp.Finish()
diff --git a/pkg/util/log/sampler.go b/pkg/util/log/sampler.go
index e14643f5377..bd338ba686a 100644
--- a/pkg/util/log/sampler.go
+++ b/pkg/util/log/sampler.go
@@ -5,7 +5,6 @@ package log
import (
"context"
"fmt"
- "time"
"go.uber.org/atomic"
)
@@ -15,18 +14,17 @@ type SampledError struct {
sampler *Sampler
}
-func (s SampledError) Error() string {
+func (s SampledError) Error() string { return s.err.Error() }
+func (s SampledError) Unwrap() error { return s.err }
+
+// ShouldLog is called by common logging module.
+func (s SampledError) ShouldLog(_ context.Context) (bool, string) {
if s.sampler == nil {
- return s.err.Error()
+ return true, ""
}
- return fmt.Sprintf("%s (sampled 1/%d)", s.err.Error(), s.sampler.freq)
-}
-func (s SampledError) Unwrap() error { return s.err }
+ return s.sampler.Sample(), fmt.Sprintf("sampled 1/%d", s.sampler.freq)
-// ShouldLog is called by common logging module.
-func (s SampledError) ShouldLog(_ context.Context, _ time.Duration) bool {
- return s.sampler == nil || s.sampler.Sample()
}
type Sampler struct {
diff --git a/pkg/util/log/sampler_test.go b/pkg/util/log/sampler_test.go
index b7aa8e9fae6..6ed42b6f870 100644
--- a/pkg/util/log/sampler_test.go
+++ b/pkg/util/log/sampler_test.go
@@ -6,8 +6,8 @@ import (
"context"
"fmt"
"testing"
- "time"
+ "github.com/grafana/dskit/middleware"
"github.com/stretchr/testify/require"
)
@@ -30,7 +30,7 @@ func TestSampledError_Error(t *testing.T) {
err := fmt.Errorf(errorWithIDFormat, 1)
sampledErr := SampledError{err: err, sampler: sampler}
- require.EqualError(t, sampledErr, fmt.Sprintf("%s (sampled 1/%d)", err.Error(), errorSampleRate))
+ require.EqualError(t, sampledErr, err.Error())
}
func TestSampledError_ShouldLog(t *testing.T) {
@@ -39,9 +39,26 @@ func TestSampledError_ShouldLog(t *testing.T) {
sampledErr := SampledError{err: err, sampler: sampler}
ctx := context.Background()
- require.True(t, sampledErr.ShouldLog(ctx, time.Duration(0)))
+ shouldLog, reason := sampledErr.ShouldLog(ctx)
+ require.True(t, shouldLog)
+ require.Equal(t, fmt.Sprintf("sampled 1/%d", errorSampleRate), reason)
+
for i := 1; i < errorSampleRate; i++ {
- require.False(t, sampledErr.ShouldLog(ctx, time.Duration(0)))
+ shouldLog, reason = sampledErr.ShouldLog(ctx)
+ require.False(t, shouldLog)
+ require.Equal(t, fmt.Sprintf("sampled 1/%d", errorSampleRate), reason)
}
- require.True(t, sampledErr.ShouldLog(ctx, time.Duration(0)))
+
+ shouldLog, reason = sampledErr.ShouldLog(ctx)
+ require.True(t, shouldLog)
+ require.Equal(t, fmt.Sprintf("sampled 1/%d", errorSampleRate), reason)
+}
+
+func TestSampledError_ShouldImplementOptionalLoggingInterface(t *testing.T) {
+ sampler := NewSampler(errorSampleRate)
+ err := fmt.Errorf(errorWithIDFormat, 1)
+ sampledErr := SampledError{err: err, sampler: sampler}
+
+ var optionalLoggingErr middleware.OptionalLogging
+ require.ErrorAs(t, sampledErr, &optionalLoggingErr)
}
diff --git a/pkg/util/noauth/no_auth.go b/pkg/util/noauth/no_auth.go
index 8abcaadd136..dd4145debb0 100644
--- a/pkg/util/noauth/no_auth.go
+++ b/pkg/util/noauth/no_auth.go
@@ -45,7 +45,7 @@ func SetupAuthMiddleware(config *server.Config, multitenancyEnabled bool, noMult
}
config.GRPCMiddleware = append(config.GRPCMiddleware,
- func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
ctx = user.InjectOrgID(ctx, noMultitenancyTenant)
return handler(ctx, req)
},
diff --git a/pkg/util/pool/fast_releasing_pool_test.go b/pkg/util/pool/fast_releasing_pool_test.go
index 3b488517dfd..2f442599f89 100644
--- a/pkg/util/pool/fast_releasing_pool_test.go
+++ b/pkg/util/pool/fast_releasing_pool_test.go
@@ -151,7 +151,7 @@ func TestFastReleasingSlabPool(t *testing.T) {
require.Greater(t, int(delegatePool.Gets.Load()), 0)
})
- t.Run("releasing slabID 0", func(t *testing.T) {
+ t.Run("releasing slabID 0", func(*testing.T) {
delegatePool := &TrackedPool{Parent: &sync.Pool{}}
slabPool := NewFastReleasingSlabPool[byte](delegatePool, 10)
diff --git a/pkg/util/testkafka/cluster.go b/pkg/util/testkafka/cluster.go
index e480c3382bf..bf6ebc1091b 100644
--- a/pkg/util/testkafka/cluster.go
+++ b/pkg/util/testkafka/cluster.go
@@ -13,6 +13,13 @@ import (
// CreateCluster returns a fake Kafka cluster for unit testing.
func CreateCluster(t testing.TB, numPartitions int32, topicName string) (*kfake.Cluster, string) {
+ cluster, addr := CreateClusterWithoutCustomConsumerGroupsSupport(t, numPartitions, topicName)
+ addSupportForConsumerGroups(t, cluster, topicName, numPartitions)
+
+ return cluster, addr
+}
+
+func CreateClusterWithoutCustomConsumerGroupsSupport(t testing.TB, numPartitions int32, topicName string) (*kfake.Cluster, string) {
cluster, err := kfake.NewCluster(kfake.NumBrokers(1), kfake.SeedTopics(numPartitions, topicName))
require.NoError(t, err)
t.Cleanup(cluster.Close)
@@ -20,8 +27,6 @@ func CreateCluster(t testing.TB, numPartitions int32, topicName string) (*kfake.
addrs := cluster.ListenAddrs()
require.Len(t, addrs, 1)
- addSupportForConsumerGroups(t, cluster, topicName, numPartitions)
-
return cluster, addrs[0]
}
@@ -35,6 +40,11 @@ func addSupportForConsumerGroups(t testing.TB, cluster *kfake.Cluster, topicName
return
}
committedOffsets[consumerGroup] = make([]int64, numPartitions+1)
+
+ // Initialise the partition offsets with the special value -1 which means "no offset committed".
+ for i := 0; i < len(committedOffsets[consumerGroup]); i++ {
+ committedOffsets[consumerGroup][i] = -1
+ }
}
cluster.ControlKey(kmsg.OffsetCommit.Int16(), func(request kmsg.Request) (kmsg.Response, error, bool) {
@@ -62,51 +72,64 @@ func addSupportForConsumerGroups(t testing.TB, cluster *kfake.Cluster, topicName
return resp, nil, true
})
- cluster.ControlKey(kmsg.OffsetFetch.Int16(), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ cluster.ControlKey(kmsg.OffsetFetch.Int16(), func(kreq kmsg.Request) (kmsg.Response, error, bool) {
cluster.KeepControl()
- commitR := request.(*kmsg.OffsetFetchRequest)
- assert.Len(t, commitR.Groups, 1, "test only has support for one consumer group per request")
- consumerGroup := commitR.Groups[0].Group
+ req := kreq.(*kmsg.OffsetFetchRequest)
+ assert.Len(t, req.Groups, 1, "test only has support for one consumer group per request")
+ consumerGroup := req.Groups[0].Group
ensureConsumerGroupExists(consumerGroup)
const allPartitions = -1
var partitionID int32
- if len(commitR.Groups[0].Topics) == 0 {
+ if len(req.Groups[0].Topics) == 0 {
// An empty request means fetch all topic-partitions for this group.
partitionID = allPartitions
} else {
- partitionID = commitR.Groups[0].Topics[0].Partitions[0]
- assert.Len(t, commitR.Groups[0], 1, "test only has support for one partition per request")
- assert.Len(t, commitR.Groups[0].Topics[0].Partitions, 1, "test only has support for one partition per request")
+ partitionID = req.Groups[0].Topics[0].Partitions[0]
+ assert.Len(t, req.Groups[0], 1, "test only has support for one partition per request")
+ assert.Len(t, req.Groups[0].Topics[0].Partitions, 1, "test only has support for one partition per request")
}
+ // Prepare the list of partitions for which the offset has been committed.
+ // This mimics the real Kafka behaviour.
var partitionsResp []kmsg.OffsetFetchResponseGroupTopicPartition
if partitionID == allPartitions {
for i := int32(1); i < numPartitions+1; i++ {
+ if committedOffsets[consumerGroup][i] >= 0 {
+ partitionsResp = append(partitionsResp, kmsg.OffsetFetchResponseGroupTopicPartition{
+ Partition: i,
+ Offset: committedOffsets[consumerGroup][i],
+ })
+ }
+ }
+ } else {
+ if committedOffsets[consumerGroup][partitionID] >= 0 {
partitionsResp = append(partitionsResp, kmsg.OffsetFetchResponseGroupTopicPartition{
- Partition: i,
- Offset: committedOffsets[consumerGroup][i],
+ Partition: partitionID,
+ Offset: committedOffsets[consumerGroup][partitionID],
})
}
- } else {
- partitionsResp = append(partitionsResp, kmsg.OffsetFetchResponseGroupTopicPartition{
- Partition: partitionID,
- Offset: committedOffsets[consumerGroup][partitionID],
- })
}
- resp := request.ResponseKind().(*kmsg.OffsetFetchResponse)
+ // Prepare the list topics for which there are some committed offsets.
+ // This mimics the real Kafka behaviour.
+ var topicsResp []kmsg.OffsetFetchResponseGroupTopic
+ if len(partitionsResp) > 0 {
+ topicsResp = []kmsg.OffsetFetchResponseGroupTopic{
+ {
+ Topic: topicName,
+ Partitions: partitionsResp,
+ },
+ }
+ }
+
+ resp := kreq.ResponseKind().(*kmsg.OffsetFetchResponse)
resp.Default()
resp.Groups = []kmsg.OffsetFetchResponseGroup{
{
- Group: consumerGroup,
- Topics: []kmsg.OffsetFetchResponseGroupTopic{
- {
- Topic: topicName,
- Partitions: partitionsResp,
- },
- },
+ Group: consumerGroup,
+ Topics: topicsResp,
},
}
return resp, nil, true
diff --git a/pkg/util/time.go b/pkg/util/time.go
index 51d912c2a4d..d897494b186 100644
--- a/pkg/util/time.go
+++ b/pkg/util/time.go
@@ -11,6 +11,7 @@ import (
"math/rand"
"net/http"
"strconv"
+ "sync"
"time"
"github.com/grafana/dskit/httpgrpc"
@@ -113,6 +114,60 @@ func NewDisableableTicker(interval time.Duration) (func(), <-chan time.Time) {
return func() { tick.Stop() }, tick.C
}
+// NewVariableTicker wrap time.Ticker to Reset() the ticker with the next duration (picked from
+// input durations) after each tick. The last configured duration is the one that will be preserved
+// once previous ones have been applied.
+//
+// Returns a function for stopping the ticker, and the ticker channel.
+func NewVariableTicker(durations ...time.Duration) (func(), <-chan time.Time) {
+ if len(durations) == 0 {
+ panic("at least 1 duration required")
+ }
+
+ // Init the ticker with the 1st duration.
+ ticker := time.NewTicker(durations[0])
+ durations = durations[1:]
+
+ // If there was only 1 duration we can simply return the built-in ticker.
+ if len(durations) == 0 {
+ return ticker.Stop, ticker.C
+ }
+
+ // Create a channel over which our ticks will be sent.
+ ticks := make(chan time.Time, 1)
+
+ // Create a channel used to signal once this ticker is stopped.
+ stopped := make(chan struct{})
+
+ go func() {
+ for {
+ select {
+ case ts := <-ticker.C:
+ if len(durations) > 0 {
+ ticker.Reset(durations[0])
+ durations = durations[1:]
+ }
+
+ ticks <- ts
+
+ case <-stopped:
+ // Interrupt the loop once stopped.
+ return
+ }
+ }
+ }()
+
+ stopOnce := sync.Once{}
+ stop := func() {
+ stopOnce.Do(func() {
+ ticker.Stop()
+ close(stopped)
+ })
+ }
+
+ return stop, ticks
+}
+
// UnixSeconds is Unix timestamp with seconds precision.
type UnixSeconds int64
diff --git a/pkg/util/time_test.go b/pkg/util/time_test.go
index 38e3d42c6a3..60a5d663809 100644
--- a/pkg/util/time_test.go
+++ b/pkg/util/time_test.go
@@ -14,6 +14,8 @@ import (
v1 "github.com/prometheus/prometheus/web/api/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/grafana/mimir/pkg/util/test"
)
const (
@@ -204,3 +206,73 @@ func TestUnixSecondsJSON(t *testing.T) {
})
}
}
+
+func TestVariableTicker(t *testing.T) {
+ test.VerifyNoLeak(t)
+
+ t.Run("should tick at configured durations", func(t *testing.T) {
+ t.Parallel()
+
+ startTime := time.Now()
+ stop, tickerChan := NewVariableTicker(time.Second, 2*time.Second)
+ t.Cleanup(stop)
+
+ // Capture the timing of 3 ticks.
+ var ticks []time.Time
+ for len(ticks) < 3 {
+ ticks = append(ticks, <-tickerChan)
+ }
+
+ tolerance := 250 * time.Millisecond
+ assert.InDelta(t, ticks[0].Sub(startTime).Seconds(), 1*time.Second.Seconds(), float64(tolerance))
+ assert.InDelta(t, ticks[1].Sub(startTime).Seconds(), 3*time.Second.Seconds(), float64(tolerance))
+ assert.InDelta(t, ticks[2].Sub(startTime).Seconds(), 5*time.Second.Seconds(), float64(tolerance))
+ })
+
+ t.Run("should not close the channel on stop function called", func(t *testing.T) {
+ t.Parallel()
+
+ for _, durations := range [][]time.Duration{{time.Second}, {time.Second, 2 * time.Second}} {
+ durations := durations
+
+ t.Run(fmt.Sprintf("durations: %v", durations), func(t *testing.T) {
+ t.Parallel()
+
+ stop, tickerChan := NewVariableTicker(durations...)
+ stop()
+
+ select {
+ case <-tickerChan:
+ t.Error("should not close the channel and not send any further tick")
+ case <-time.After(2 * time.Second):
+ // All good.
+ }
+ })
+ }
+ })
+
+ t.Run("stop function should be idempotent", func(t *testing.T) {
+ t.Parallel()
+
+ for _, durations := range [][]time.Duration{{time.Second}, {time.Second, 2 * time.Second}} {
+ durations := durations
+
+ t.Run(fmt.Sprintf("durations: %v", durations), func(t *testing.T) {
+ t.Parallel()
+
+ stop, tickerChan := NewVariableTicker(durations...)
+
+ // Call stop() twice.
+ stop()
+ stop()
+
+ select {
+ case <-tickerChan:
+ t.Error("should not close the channel and not send any further tick")
+ case <-time.After(2 * time.Second):
+ // All good.
+ }
+ })
+ }
+ })
+}
diff --git a/pkg/util/usage/usage.go b/pkg/util/usage/usage.go
index 4eec74b1718..8a370d6efdd 100644
--- a/pkg/util/usage/usage.go
+++ b/pkg/util/usage/usage.go
@@ -12,7 +12,7 @@ import (
"github.com/grafana/dskit/flagext"
"github.com/grafana/mimir/pkg/ingester/activeseries"
- "github.com/grafana/mimir/pkg/util/fieldcategory"
+ "github.com/grafana/mimir/pkg/util/configdoc"
)
// Usage prints command-line usage.
@@ -30,7 +30,7 @@ func Usage(printAll bool, configs ...interface{}) error {
fmt.Fprintf(fs.Output(), "Usage of %s:\n", os.Args[0])
fs.VisitAll(func(fl *flag.Flag) {
v := reflect.ValueOf(fl.Value)
- fieldCat := fieldcategory.Basic
+ fieldCat := configdoc.Basic
var field reflect.StructField
var hasField bool
@@ -41,30 +41,30 @@ func Usage(printAll bool, configs ...interface{}) error {
if v.Kind() == reflect.Ptr {
ptr := v.Pointer()
field, hasField = fields[ptr]
- if hasField && isFieldHidden(field) {
+ if hasField && isFieldHidden(field, fl.Name) {
// Don't print help for this flag since it's hidden
return
}
}
- if override, ok := fieldcategory.GetOverride(fl.Name); ok {
+ if override, ok := configdoc.GetCategoryOverride(fl.Name); ok {
fieldCat = override
} else if hasField {
catStr := field.Tag.Get("category")
switch catStr {
case "advanced":
- fieldCat = fieldcategory.Advanced
+ fieldCat = configdoc.Advanced
case "experimental":
- fieldCat = fieldcategory.Experimental
+ fieldCat = configdoc.Experimental
case "deprecated":
- fieldCat = fieldcategory.Deprecated
+ fieldCat = configdoc.Deprecated
}
} else {
// The field is neither an override nor has been parsed, so we'll skip it.
return
}
- if fieldCat != fieldcategory.Basic && !printAll {
+ if fieldCat != configdoc.Basic && !printAll {
// Don't print help for this flag since we're supposed to print only basic flags
return
}
@@ -81,9 +81,9 @@ func Usage(printAll bool, configs ...interface{}) error {
// for both 4- and 8-space tab stops.
b.WriteString("\n \t")
switch fieldCat {
- case fieldcategory.Experimental:
+ case configdoc.Experimental:
b.WriteString("[experimental] ")
- case fieldcategory.Deprecated:
+ case configdoc.Deprecated:
b.WriteString("[deprecated] ")
}
b.WriteString(strings.ReplaceAll(fl.Usage, "\n", "\n \t"))
@@ -152,7 +152,7 @@ func parseStructure(structure interface{}, fields map[uintptr]reflect.StructFiel
fields[fieldValue.Addr().Pointer()] = field
// Recurse if a struct
- if field.Type.Kind() != reflect.Struct || isFieldHidden(field) || ignoreStructType(field.Type) || !field.IsExported() {
+ if field.Type.Kind() != reflect.Struct || isFieldHidden(field, "") || ignoreStructType(field.Type) || !field.IsExported() {
continue
}
@@ -231,7 +231,11 @@ func getFlagName(fl *flag.Flag) string {
return "value"
}
-func isFieldHidden(f reflect.StructField) bool {
+func isFieldHidden(f reflect.StructField, name string) bool {
+ if hidden, ok := configdoc.GetHiddenOverride(name); ok {
+ return hidden
+ }
+
return getDocTagFlag(f, "hidden")
}
diff --git a/pkg/util/validation/exporter/exporter_test.go b/pkg/util/validation/exporter/exporter_test.go
index c819df6f2d6..cbb79835638 100644
--- a/pkg/util/validation/exporter/exporter_test.go
+++ b/pkg/util/validation/exporter/exporter_test.go
@@ -284,7 +284,7 @@ func TestOverridesExporter_withRing(t *testing.T) {
// Create an empty ring.
ctx := context.Background()
- require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ require.NoError(t, ringStore.CAS(ctx, ringKey, func(interface{}) (out interface{}, retry bool, err error) {
return ring.NewDesc(), true, nil
}))
diff --git a/pkg/util/validation/exporter/ring_test.go b/pkg/util/validation/exporter/ring_test.go
index a1b96b6a752..efd7419dfa7 100644
--- a/pkg/util/validation/exporter/ring_test.go
+++ b/pkg/util/validation/exporter/ring_test.go
@@ -22,7 +22,7 @@ func TestOverridesExporter_emptyRing(t *testing.T) {
// Create an empty ring.
ctx := context.Background()
- require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ require.NoError(t, ringStore.CAS(ctx, ringKey, func(interface{}) (out interface{}, retry bool, err error) {
return ring.NewDesc(), true, nil
}))
@@ -66,7 +66,7 @@ func TestOverridesExporterRing_scaleDown(t *testing.T) {
// Register instances in the ring (manually, to be able to assign tokens).
ctx := context.Background()
- require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ require.NoError(t, ringStore.CAS(ctx, ringKey, func(interface{}) (out interface{}, retry bool, err error) {
desc := ring.NewDesc()
desc.AddIngester(l1.GetInstanceID(), l1.GetInstanceAddr(), "", []uint32{leaderToken + 1}, ring.ACTIVE, time.Now())
desc.AddIngester(l2.GetInstanceID(), l2.GetInstanceAddr(), "", []uint32{leaderToken + 2}, ring.ACTIVE, time.Now())
diff --git a/pkg/util/version/info_handler.go b/pkg/util/version/info_handler.go
index 3d6038f57f3..ee9008b906e 100644
--- a/pkg/util/version/info_handler.go
+++ b/pkg/util/version/info_handler.go
@@ -30,8 +30,7 @@ type BuildInfoFeatures struct {
}
func BuildInfoHandler(application string, features interface{}) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-
+ return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
response := BuildInfoResponse{
Status: "success",
BuildInfo: BuildInfo{
diff --git a/renovate.json b/renovate.json
index 49ca47a6f8d..6a25bcb1d52 100644
--- a/renovate.json
+++ b/renovate.json
@@ -11,7 +11,7 @@
"schedule": ["before 9am on Monday"],
"packageRules": [
{
- "matchBaseBranches": ["release-2.12","release-2.11","release-2.10"],
+ "matchBaseBranches": ["release-2.12","release-2.11"],
"packagePatterns": ["*"],
"enabled": false
},
diff --git a/tools/compaction-planner/main.go b/tools/compaction-planner/main.go
index 0e315f40346..becc240869e 100644
--- a/tools/compaction-planner/main.go
+++ b/tools/compaction-planner/main.go
@@ -16,7 +16,6 @@ import (
gokitlog "github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
- "github.com/oklog/ulid"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/model/timestamp"
@@ -76,24 +75,7 @@ func main() {
log.Println("Using index from", time.Unix(idx.UpdatedAt, 0).UTC().Format(time.RFC3339))
- // convert index to metas.
- deleted := map[ulid.ULID]bool{}
- for _, id := range idx.BlockDeletionMarks.GetULIDs() {
- deleted[id] = true
- }
-
- metas := map[ulid.ULID]*block.Meta{}
- for _, b := range idx.Blocks {
- if deleted[b.ID] {
- continue
- }
- metas[b.ID] = b.ThanosMeta()
- if metas[b.ID].Thanos.Labels == nil {
- metas[b.ID].Thanos.Labels = map[string]string{}
- }
- metas[b.ID].Thanos.Labels[mimir_tsdb.CompactorShardIDExternalLabel] = b.CompactorShardID // Needed for correct planning.
- }
-
+ metas := compactor.ConvertBucketIndexToMetasForCompactionJobPlanning(idx)
synced := extprom.NewTxGaugeVec(nil, prometheus.GaugeOpts{Name: "synced", Help: "Number of block metadata synced"},
[]string{"state"}, []string{block.MarkedForNoCompactionMeta},
)
diff --git a/tools/doc-generator/parse/parser.go b/tools/doc-generator/parse/parser.go
index 9a97c45bc27..c1c78cb5fe1 100644
--- a/tools/doc-generator/parse/parser.go
+++ b/tools/doc-generator/parse/parser.go
@@ -21,10 +21,11 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/relabel"
+ "github.com/thanos-io/objstore/providers/s3"
"github.com/grafana/mimir/pkg/ingester/activeseries"
"github.com/grafana/mimir/pkg/storage/tsdb"
- "github.com/grafana/mimir/pkg/util/fieldcategory"
+ "github.com/grafana/mimir/pkg/util/configdoc"
"github.com/grafana/mimir/pkg/util/validation"
)
@@ -154,7 +155,7 @@ func config(block *ConfigBlock, cfg interface{}, flags map[uintptr]*flag.Flag, r
fieldValue := v.FieldByIndex(field.Index)
// Skip fields explicitly marked as "hidden" in the doc
- if isFieldHidden(field) {
+ if isFieldHidden(field, "") {
continue
}
@@ -292,6 +293,12 @@ func config(block *ConfigBlock, cfg interface{}, flags map[uintptr]*flag.Flag, r
continue
}
+ // The config field has a CLI flag registered. We should check again if the field is hidden,
+ // to ensure any CLI flag override is honored too.
+ if isFieldHidden(field, fieldFlag.Name) {
+ continue
+ }
+
block.Add(&ConfigEntry{
Kind: kind,
Name: fieldName,
@@ -583,12 +590,29 @@ func getCustomFieldEntry(cfg interface{}, field reflect.StructField, fieldValue
FieldCategory: getFieldCategory(field, fieldFlag.Name),
}, nil
}
+ if field.Type == reflect.TypeOf(s3.BucketLookupType(0)) {
+ fieldFlag, err := getFieldFlag(field, fieldValue, flags)
+ if err != nil || fieldFlag == nil {
+ return nil, err
+ }
+
+ return &ConfigEntry{
+ Kind: KindField,
+ Name: getFieldName(field),
+ Required: isFieldRequired(field),
+ FieldFlag: fieldFlag.Name,
+ FieldDesc: getFieldDescription(cfg, field, fieldFlag.Usage),
+ FieldType: "string",
+ FieldDefault: getFieldDefault(field, fieldFlag.DefValue),
+ FieldCategory: getFieldCategory(field, fieldFlag.Name),
+ }, nil
+ }
return nil, nil
}
func getFieldCategory(field reflect.StructField, name string) string {
- if category, ok := fieldcategory.GetOverride(name); ok {
+ if category, ok := configdoc.GetCategoryOverride(name); ok {
return category.String()
}
return field.Tag.Get("category")
@@ -602,7 +626,10 @@ func getFieldDefault(field reflect.StructField, fallback string) string {
return fallback
}
-func isFieldHidden(f reflect.StructField) bool {
+func isFieldHidden(f reflect.StructField, name string) bool {
+ if hidden, ok := configdoc.GetHiddenOverride(name); ok {
+ return hidden
+ }
return getDocTagFlag(f, "hidden")
}
diff --git a/tools/querytee/proxy.go b/tools/querytee/proxy.go
index 3247345f9ab..d0a3693a4ab 100644
--- a/tools/querytee/proxy.go
+++ b/tools/querytee/proxy.go
@@ -73,7 +73,7 @@ type Route struct {
type Proxy struct {
cfg ProxyConfig
- backends []*ProxyBackend
+ backends []ProxyBackendInterface
logger log.Logger
registerer prometheus.Registerer
metrics *ProxyMetrics
@@ -141,7 +141,7 @@ func NewProxy(cfg ProxyConfig, logger log.Logger, routes []Route, registerer pro
if cfg.PreferredBackend != "" {
exists := false
for _, b := range p.backends {
- if b.preferred {
+ if b.Preferred() {
exists = true
break
}
@@ -216,8 +216,8 @@ func (p *Proxy) Start() error {
if p.cfg.PassThroughNonRegisteredRoutes {
for _, backend := range p.backends {
- if backend.preferred {
- router.PathPrefix("/").Handler(httputil.NewSingleHostReverseProxy(backend.endpoint))
+ if backend.Preferred() {
+ router.PathPrefix("/").Handler(httputil.NewSingleHostReverseProxy(backend.Endpoint()))
break
}
}
diff --git a/tools/querytee/proxy_backend.go b/tools/querytee/proxy_backend.go
index 0d5e87184f1..6459e81fe27 100644
--- a/tools/querytee/proxy_backend.go
+++ b/tools/querytee/proxy_backend.go
@@ -18,6 +18,13 @@ import (
"github.com/pkg/errors"
)
+type ProxyBackendInterface interface {
+ Name() string
+ Endpoint() *url.URL
+ Preferred() bool
+ ForwardRequest(orig *http.Request, body io.ReadCloser) (time.Duration, int, []byte, *http.Response, error)
+}
+
// ProxyBackend holds the information of a single backend.
type ProxyBackend struct {
name string
@@ -31,7 +38,7 @@ type ProxyBackend struct {
}
// NewProxyBackend makes a new ProxyBackend
-func NewProxyBackend(name string, endpoint *url.URL, timeout time.Duration, preferred bool, skipTLSVerify bool) *ProxyBackend {
+func NewProxyBackend(name string, endpoint *url.URL, timeout time.Duration, preferred bool, skipTLSVerify bool) ProxyBackendInterface {
return &ProxyBackend{
name: name,
endpoint: endpoint,
@@ -59,13 +66,29 @@ func NewProxyBackend(name string, endpoint *url.URL, timeout time.Duration, pref
}
}
-func (b *ProxyBackend) ForwardRequest(orig *http.Request, body io.ReadCloser) (int, []byte, *http.Response, error) {
+func (b *ProxyBackend) Name() string {
+ return b.name
+}
+
+func (b *ProxyBackend) Endpoint() *url.URL {
+ return b.endpoint
+}
+
+func (b *ProxyBackend) Preferred() bool {
+ return b.preferred
+}
+
+func (b *ProxyBackend) ForwardRequest(orig *http.Request, body io.ReadCloser) (time.Duration, int, []byte, *http.Response, error) {
req, err := b.createBackendRequest(orig, body)
if err != nil {
- return 0, nil, nil, err
+ return 0, 0, nil, nil, err
}
- return b.doBackendRequest(req)
+ start := time.Now()
+ status, responseBody, resp, err := b.doBackendRequest(req)
+ elapsed := time.Since(start)
+
+ return elapsed, status, responseBody, resp, err
}
func (b *ProxyBackend) createBackendRequest(orig *http.Request, body io.ReadCloser) (*http.Request, error) {
diff --git a/tools/querytee/proxy_backend_test.go b/tools/querytee/proxy_backend_test.go
index 68ed48e48f2..2d66ec80936 100644
--- a/tools/querytee/proxy_backend_test.go
+++ b/tools/querytee/proxy_backend_test.go
@@ -84,7 +84,11 @@ func Test_ProxyBackend_createBackendRequest_HTTPBasicAuthentication(t *testing.T
}
b := NewProxyBackend("test", u, time.Second, false, false)
- r, err := b.createBackendRequest(orig, nil)
+ bp, ok := b.(*ProxyBackend)
+ if !ok {
+ t.Fatalf("Type assertion to *ProxyBackend failed")
+ }
+ r, err := bp.createBackendRequest(orig, nil)
require.NoError(t, err)
actualUser, actualPass, _ := r.BasicAuth()
diff --git a/tools/querytee/proxy_endpoint.go b/tools/querytee/proxy_endpoint.go
index 6bc54df452a..e81a893b99f 100644
--- a/tools/querytee/proxy_endpoint.go
+++ b/tools/querytee/proxy_endpoint.go
@@ -23,7 +23,7 @@ type ResponsesComparator interface {
}
type ProxyEndpoint struct {
- backends []*ProxyBackend
+ backends []ProxyBackendInterface
metrics *ProxyMetrics
logger log.Logger
comparator ResponsesComparator
@@ -36,10 +36,10 @@ type ProxyEndpoint struct {
routeName string
}
-func NewProxyEndpoint(backends []*ProxyBackend, routeName string, metrics *ProxyMetrics, logger log.Logger, comparator ResponsesComparator, slowResponseThreshold time.Duration) *ProxyEndpoint {
+func NewProxyEndpoint(backends []ProxyBackendInterface, routeName string, metrics *ProxyMetrics, logger log.Logger, comparator ResponsesComparator, slowResponseThreshold time.Duration) *ProxyEndpoint {
hasPreferredBackend := false
for _, backend := range backends {
- if backend.preferred {
+ if backend.Preferred() {
hasPreferredBackend = true
break
}
@@ -74,7 +74,7 @@ func (p *ProxyEndpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
- p.metrics.responsesTotal.WithLabelValues(downstreamRes.backend.name, r.Method, p.routeName).Inc()
+ p.metrics.responsesTotal.WithLabelValues(downstreamRes.backend.Name(), r.Method, p.routeName).Inc()
}
func (p *ProxyEndpoint) executeBackendRequests(req *http.Request, resCh chan *backendResponse) {
@@ -110,9 +110,9 @@ func (p *ProxyEndpoint) executeBackendRequests(req *http.Request, resCh chan *ba
// Keep track of the fastest and slowest backends
var (
fastestDuration time.Duration
- fastestBackend *ProxyBackend
+ fastestBackend ProxyBackendInterface
slowestDuration time.Duration
- slowestBackend *ProxyBackend
+ slowestBackend ProxyBackendInterface
)
wg.Add(len(p.backends))
@@ -121,16 +121,12 @@ func (p *ProxyEndpoint) executeBackendRequests(req *http.Request, resCh chan *ba
go func() {
defer wg.Done()
- var (
- bodyReader io.ReadCloser
- start = time.Now()
- )
+ var bodyReader io.ReadCloser
if len(body) > 0 {
bodyReader = io.NopCloser(bytes.NewReader(body))
}
- status, body, resp, err := b.ForwardRequest(req, bodyReader)
- elapsed := time.Since(start)
+ elapsed, status, body, resp, err := b.ForwardRequest(req, bodyReader)
contentType := ""
if p.slowResponseThreshold > 0 {
@@ -165,8 +161,8 @@ func (p *ProxyEndpoint) executeBackendRequests(req *http.Request, resCh chan *ba
lvl = level.Warn
}
- lvl(p.logger).Log("msg", "Backend response", "path", req.URL.Path, "query", query, "backend", b.name, "status", status, "elapsed", elapsed)
- p.metrics.requestDuration.WithLabelValues(res.backend.name, req.Method, p.routeName, strconv.Itoa(res.statusCode())).Observe(elapsed.Seconds())
+ lvl(p.logger).Log("msg", "Backend response", "path", req.URL.Path, "query", query, "backend", b.Name(), "status", status, "elapsed", elapsed)
+ p.metrics.requestDuration.WithLabelValues(res.backend.Name(), req.Method, p.routeName, strconv.Itoa(res.statusCode())).Observe(elapsed.Seconds())
// Keep track of the response if required.
if p.comparator != nil {
@@ -187,7 +183,7 @@ func (p *ProxyEndpoint) executeBackendRequests(req *http.Request, resCh chan *ba
if p.comparator != nil {
expectedResponse := responses[0]
actualResponse := responses[1]
- if responses[1].backend.preferred {
+ if responses[1].backend.Preferred() {
expectedResponse, actualResponse = actualResponse, expectedResponse
}
@@ -222,9 +218,9 @@ func (p *ProxyEndpoint) executeBackendRequests(req *http.Request, resCh chan *ba
"query", query,
"user", req.Header.Get("X-Scope-OrgID"),
"slowest_duration", slowestDuration,
- "slowest_backend", slowestBackend.name,
+ "slowest_backend", slowestBackend.Name(),
"fastest_duration", fastestDuration,
- "fastest_backend", fastestBackend.name,
+ "fastest_backend", fastestBackend.Name(),
)
}
@@ -243,13 +239,13 @@ func (p *ProxyEndpoint) waitBackendResponseForDownstream(resCh chan *backendResp
// - There's no preferred backend configured
// - Or this response is from the preferred backend
// - Or the preferred backend response has already been received and wasn't successful
- if res.succeeded() && (!p.hasPreferredBackend || res.backend.preferred || preferredResponseReceived) {
+ if res.succeeded() && (!p.hasPreferredBackend || res.backend.Preferred() || preferredResponseReceived) {
return res
}
// If we received a non-successful response from the preferred backend, then we can
// return the first successful response received so far (if any).
- if res.backend.preferred && !res.succeeded() {
+ if res.backend.Preferred() && !res.succeeded() {
preferredResponseReceived = true
for _, prevRes := range responses {
@@ -292,7 +288,7 @@ func (p *ProxyEndpoint) compareResponses(expectedResponse, actualResponse *backe
}
type backendResponse struct {
- backend *ProxyBackend
+ backend ProxyBackendInterface
status int
contentType string
body []byte
diff --git a/tools/querytee/proxy_endpoint_test.go b/tools/querytee/proxy_endpoint_test.go
index 97bf3f0b36e..95b5ec0a579 100644
--- a/tools/querytee/proxy_endpoint_test.go
+++ b/tools/querytee/proxy_endpoint_test.go
@@ -39,19 +39,19 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) {
backendOther2 := NewProxyBackend("backend-3", backendURL3, time.Second, false, false)
tests := map[string]struct {
- backends []*ProxyBackend
+ backends []ProxyBackendInterface
responses []*backendResponse
- expected *ProxyBackend
+ expected ProxyBackendInterface
}{
"the preferred backend is the 1st response received": {
- backends: []*ProxyBackend{backendPref, backendOther1},
+ backends: []ProxyBackendInterface{backendPref, backendOther1},
responses: []*backendResponse{
{backend: backendPref, status: 200},
},
expected: backendPref,
},
"the preferred backend is the last response received": {
- backends: []*ProxyBackend{backendPref, backendOther1},
+ backends: []ProxyBackendInterface{backendPref, backendOther1},
responses: []*backendResponse{
{backend: backendOther1, status: 200},
{backend: backendPref, status: 200},
@@ -59,7 +59,7 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) {
expected: backendPref,
},
"the preferred backend is the last response received but it's not successful": {
- backends: []*ProxyBackend{backendPref, backendOther1},
+ backends: []ProxyBackendInterface{backendPref, backendOther1},
responses: []*backendResponse{
{backend: backendOther1, status: 200},
{backend: backendPref, status: 500},
@@ -67,7 +67,7 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) {
expected: backendOther1,
},
"the preferred backend is the 2nd response received but only the last one is successful": {
- backends: []*ProxyBackend{backendPref, backendOther1, backendOther2},
+ backends: []ProxyBackendInterface{backendPref, backendOther1, backendOther2},
responses: []*backendResponse{
{backend: backendOther1, status: 500},
{backend: backendPref, status: 500},
@@ -76,14 +76,14 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) {
expected: backendOther2,
},
"there's no preferred backend configured and the 1st response is successful": {
- backends: []*ProxyBackend{backendOther1, backendOther2},
+ backends: []ProxyBackendInterface{backendOther1, backendOther2},
responses: []*backendResponse{
{backend: backendOther1, status: 200},
},
expected: backendOther1,
},
"there's no preferred backend configured and the last response is successful": {
- backends: []*ProxyBackend{backendOther1, backendOther2},
+ backends: []ProxyBackendInterface{backendOther1, backendOther2},
responses: []*backendResponse{
{backend: backendOther1, status: 500},
{backend: backendOther2, status: 200},
@@ -91,7 +91,7 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) {
expected: backendOther2,
},
"no received response is successful": {
- backends: []*ProxyBackend{backendPref, backendOther1},
+ backends: []ProxyBackendInterface{backendPref, backendOther1},
responses: []*backendResponse{
{backend: backendOther1, status: 500},
{backend: backendPref, status: 500},
@@ -144,7 +144,7 @@ func Test_ProxyEndpoint_Requests(t *testing.T) {
backendURL2, err := url.Parse(backend2.URL)
require.NoError(t, err)
- backends := []*ProxyBackend{
+ backends := []ProxyBackendInterface{
NewProxyBackend("backend-1", backendURL1, time.Second, true, false),
NewProxyBackend("backend-2", backendURL2, time.Second, false, false),
}
@@ -212,7 +212,7 @@ func Test_ProxyEndpoint_Requests(t *testing.T) {
wg.Add(2)
if tc.handler == nil {
- testHandler = func(w http.ResponseWriter, r *http.Request) {
+ testHandler = func(w http.ResponseWriter, _ *http.Request) {
_, _ = w.Write([]byte("ok"))
}
@@ -288,7 +288,7 @@ func Test_ProxyEndpoint_Comparison(t *testing.T) {
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
- preferredBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ preferredBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", scenario.preferredResponseContentType)
w.WriteHeader(scenario.preferredResponseStatusCode)
_, err := w.Write([]byte("preferred response"))
@@ -299,7 +299,7 @@ func Test_ProxyEndpoint_Comparison(t *testing.T) {
preferredBackendURL, err := url.Parse(preferredBackend.URL)
require.NoError(t, err)
- secondaryBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ secondaryBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", scenario.secondaryResponseContentType)
w.WriteHeader(scenario.secondaryResponseStatusCode)
_, err := w.Write([]byte("secondary response"))
@@ -310,7 +310,7 @@ func Test_ProxyEndpoint_Comparison(t *testing.T) {
secondaryBackendURL, err := url.Parse(secondaryBackend.URL)
require.NoError(t, err)
- backends := []*ProxyBackend{
+ backends := []ProxyBackendInterface{
NewProxyBackend("preferred-backend", preferredBackendURL, time.Second, true, false),
NewProxyBackend("secondary-backend", secondaryBackendURL, time.Second, false, false),
}
@@ -361,77 +361,59 @@ func Test_ProxyEndpoint_LogSlowQueries(t *testing.T) {
}{
"responses are below threshold": {
slowResponseThreshold: 100 * time.Millisecond,
- preferredResponseLatency: 0 * time.Millisecond,
- secondaryResponseLatency: 0 * time.Millisecond,
+ preferredResponseLatency: 1 * time.Millisecond,
+ secondaryResponseLatency: 1 * time.Millisecond,
expectLatencyExceedsThreshold: false,
},
"one response above threshold": {
- slowResponseThreshold: 100 * time.Millisecond,
- preferredResponseLatency: 0 * time.Millisecond,
- secondaryResponseLatency: 101 * time.Millisecond,
+ slowResponseThreshold: 50 * time.Millisecond,
+ preferredResponseLatency: 1 * time.Millisecond,
+ secondaryResponseLatency: 70 * time.Millisecond,
expectLatencyExceedsThreshold: true,
fastestBackend: "preferred-backend",
slowestBackend: "secondary-backend",
},
"responses are both above threshold, but lower than threshold between themselves": {
- slowResponseThreshold: 100 * time.Millisecond,
- preferredResponseLatency: 101 * time.Millisecond,
- secondaryResponseLatency: 150 * time.Millisecond,
+ slowResponseThreshold: 50 * time.Millisecond,
+ preferredResponseLatency: 51 * time.Millisecond,
+ secondaryResponseLatency: 62 * time.Millisecond,
expectLatencyExceedsThreshold: false,
},
"responses are both above threshold, and above threshold between themselves": {
- slowResponseThreshold: 100 * time.Millisecond,
- preferredResponseLatency: 101 * time.Millisecond,
- secondaryResponseLatency: 202 * time.Millisecond,
+ slowResponseThreshold: 10 * time.Millisecond,
+ preferredResponseLatency: 11 * time.Millisecond,
+ secondaryResponseLatency: 52 * time.Millisecond,
expectLatencyExceedsThreshold: true,
fastestBackend: "preferred-backend",
slowestBackend: "secondary-backend",
},
"secondary latency is faster than primary, and difference is below threshold": {
- slowResponseThreshold: 100 * time.Millisecond,
- preferredResponseLatency: 50 * time.Millisecond,
- secondaryResponseLatency: 0 * time.Millisecond,
+ slowResponseThreshold: 50 * time.Millisecond,
+ preferredResponseLatency: 10 * time.Millisecond,
+ secondaryResponseLatency: 1 * time.Millisecond,
expectLatencyExceedsThreshold: false,
},
"secondary latency is faster than primary, and difference is above threshold": {
- slowResponseThreshold: 100 * time.Millisecond,
- preferredResponseLatency: 101 * time.Millisecond,
- secondaryResponseLatency: 0 * time.Millisecond,
+ slowResponseThreshold: 50 * time.Millisecond,
+ preferredResponseLatency: 71 * time.Millisecond,
+ secondaryResponseLatency: 1 * time.Millisecond,
expectLatencyExceedsThreshold: true,
fastestBackend: "secondary-backend",
slowestBackend: "preferred-backend",
},
+ "slowest response threshold is disabled (0)": {
+ slowResponseThreshold: 0 * time.Millisecond,
+ preferredResponseLatency: 200 * time.Millisecond,
+ secondaryResponseLatency: 100 * time.Millisecond,
+ expectLatencyExceedsThreshold: false,
+ },
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
- preferredBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(200)
- time.Sleep(scenario.preferredResponseLatency)
- _, err := w.Write([]byte("preferred response"))
- require.NoError(t, err)
- }))
-
- defer preferredBackend.Close()
- preferredBackendURL, err := url.Parse(preferredBackend.URL)
- require.NoError(t, err)
-
- secondaryBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(200)
- time.Sleep(scenario.secondaryResponseLatency)
- _, err := w.Write([]byte("preferred response"))
- require.NoError(t, err)
- }))
-
- defer secondaryBackend.Close()
- secondaryBackendURL, err := url.Parse(secondaryBackend.URL)
- require.NoError(t, err)
-
- backends := []*ProxyBackend{
- NewProxyBackend("preferred-backend", preferredBackendURL, time.Second, true, false),
- NewProxyBackend("secondary-backend", secondaryBackendURL, time.Second, false, false),
+ backends := []ProxyBackendInterface{
+ newMockProxyBackend("preferred-backend", time.Second, true, scenario.preferredResponseLatency),
+ newMockProxyBackend("secondary-backend", time.Second, false, scenario.secondaryResponseLatency),
}
logger := newMockLogger()
@@ -649,3 +631,41 @@ func (m *mockLogger) Log(keyvals ...interface{}) error {
return nil
}
+
+type mockProxyBackend struct {
+ name string
+ timeout time.Duration
+ preferred bool
+ fakeResponseLatency time.Duration
+}
+
+func newMockProxyBackend(name string, timeout time.Duration, preferred bool, fakeResponseLatency time.Duration) ProxyBackendInterface {
+ return &mockProxyBackend{
+ name: name,
+ timeout: timeout,
+ preferred: preferred,
+ fakeResponseLatency: fakeResponseLatency,
+ }
+}
+
+func (b *mockProxyBackend) Name() string {
+ return b.name
+}
+
+func (b *mockProxyBackend) Endpoint() *url.URL {
+ return nil
+}
+
+func (b *mockProxyBackend) Preferred() bool {
+ return b.preferred
+}
+
+func (b *mockProxyBackend) ForwardRequest(_ *http.Request, _ io.ReadCloser) (time.Duration, int, []byte, *http.Response, error) {
+ resp := &http.Response{
+ StatusCode: 200,
+ Header: make(http.Header),
+ Body: io.NopCloser(bytes.NewBufferString(`{}`)),
+ }
+ resp.Header.Set("Content-Type", "application/json")
+ return time.Duration(b.fakeResponseLatency), 200, []byte("{}"), resp, nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index 85f9f57365f..fdff3fdb4cb 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool {
// The allocated message is stored in the embedded proto.Message.
//
// Example:
-// var x ptypes.DynamicAny
-// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-// fmt.Printf("unmarshaled message: %v", x.Message)
+//
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
//
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
// the any message contents into a new instance of the underlying message.
diff --git a/vendor/github.com/grafana/dskit/gate/gate.go b/vendor/github.com/grafana/dskit/gate/gate.go
index 1f915ecba12..fe050edade5 100644
--- a/vendor/github.com/grafana/dskit/gate/gate.go
+++ b/vendor/github.com/grafana/dskit/gate/gate.go
@@ -64,7 +64,7 @@ func NewInstrumented(reg prometheus.Registerer, maxConcurrent int, gate Gate) Ga
Name: "gate_queries_in_flight",
Help: "Number of queries that are currently in flight.",
}),
- duration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+ duration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
Name: "gate_duration_seconds",
Help: "How many seconds it took for queries to wait at the gate.",
Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120, 240, 360, 720},
@@ -72,7 +72,7 @@ func NewInstrumented(reg prometheus.Registerer, maxConcurrent int, gate Gate) Ga
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: time.Hour,
- }),
+ }, []string{"outcome"}),
}
g.max.Set(float64(maxConcurrent))
@@ -84,20 +84,28 @@ type instrumentedGate struct {
max prometheus.Gauge
inflight prometheus.Gauge
- duration prometheus.Histogram
+ duration *prometheus.HistogramVec
}
func (g *instrumentedGate) Start(ctx context.Context) error {
start := time.Now()
- defer func() {
- g.duration.Observe(time.Since(start).Seconds())
- }()
err := g.gate.Start(ctx)
if err != nil {
+ var reason string
+ switch {
+ case errors.Is(err, context.Canceled):
+ reason = "rejected_canceled"
+ case errors.Is(err, context.DeadlineExceeded):
+ reason = "rejected_deadline_exceeded"
+ default:
+ reason = "rejected_other"
+ }
+ g.duration.WithLabelValues(reason).Observe(time.Since(start).Seconds())
return err
}
+ g.duration.WithLabelValues("permitted").Observe(time.Since(start).Seconds())
g.inflight.Inc()
return nil
}
diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go
index feab3647432..68a2ce037ee 100644
--- a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go
+++ b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go
@@ -6,11 +6,12 @@ package middleware
import (
"context"
- "errors"
+ "fmt"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/pkg/errors"
dskit_log "github.com/grafana/dskit/log"
@@ -24,16 +25,20 @@ const (
gRPC = "gRPC"
)
-// An error can implement ShouldLog() to control whether GRPCServerLog will log.
+// OptionalLogging is the interface that needs be implemented by an error that wants to control whether the log
+// should be logged by GRPCServerLog.
type OptionalLogging interface {
- ShouldLog(ctx context.Context, duration time.Duration) bool
+ // ShouldLog returns whether the error should be logged and the reason. For example, if the error should be sampled
+ // return returned reason could be something like "sampled 1/10". The reason, if any, is used to decorate the error
+ // both in case the error should be logged or skipped.
+ ShouldLog(ctx context.Context) (bool, string)
}
type DoNotLogError struct{ Err error }
-func (i DoNotLogError) Error() string { return i.Err.Error() }
-func (i DoNotLogError) Unwrap() error { return i.Err }
-func (i DoNotLogError) ShouldLog(_ context.Context, _ time.Duration) bool { return false }
+func (i DoNotLogError) Error() string { return i.Err.Error() }
+func (i DoNotLogError) Unwrap() error { return i.Err }
+func (i DoNotLogError) ShouldLog(_ context.Context) (bool, string) { return false, "" }
// GRPCServerLog logs grpc requests, errors, and latency.
type GRPCServerLog struct {
@@ -50,8 +55,13 @@ func (s GRPCServerLog) UnaryServerInterceptor(ctx context.Context, req interface
if err == nil && s.DisableRequestSuccessLog {
return resp, nil
}
- var optional OptionalLogging
- if errors.As(err, &optional) && !optional.ShouldLog(ctx, time.Since(begin)) {
+
+ // Honor sampled error logging.
+ keep, reason := shouldLog(ctx, err)
+ if reason != "" {
+ err = fmt.Errorf("%w (%s)", err, reason)
+ }
+ if !keep {
return resp, err
}
@@ -91,3 +101,12 @@ func (s GRPCServerLog) StreamServerInterceptor(srv interface{}, ss grpc.ServerSt
}
return err
}
+
+func shouldLog(ctx context.Context, err error) (bool, string) {
+ var optional OptionalLogging
+ if !errors.As(err, &optional) {
+ return true, ""
+ }
+
+ return optional.ShouldLog(ctx)
+}
diff --git a/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go b/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go
index 9ad31a54f26..09fef722337 100644
--- a/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go
+++ b/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go
@@ -23,7 +23,7 @@ var (
allowedPartitionStateChanges = map[PartitionState][]PartitionState{
PartitionPending: {PartitionActive, PartitionInactive},
PartitionActive: {PartitionInactive},
- PartitionInactive: {PartitionPending, PartitionActive},
+ PartitionInactive: {PartitionActive},
}
)
diff --git a/vendor/github.com/grafana/dskit/server/PROXYPROTOCOL.md b/vendor/github.com/grafana/dskit/server/PROXYPROTOCOL.md
new file mode 100644
index 00000000000..726bde758dc
--- /dev/null
+++ b/vendor/github.com/grafana/dskit/server/PROXYPROTOCOL.md
@@ -0,0 +1,28 @@
+# PROXY protocol support
+
+> **Note:** enabling PROXY protocol support does not break existing setups (e.g. non-PROXY connections are still accepted), however it does add a small overhead to the connection handling.
+
+To enable PROXY protocol support, set `Config.ProxyProtocolEnabled` to `true` before initializing a `Server` in your application. This enables PROXY protocol for both HTTP and gRPC servers.
+
+```go
+cfg := &Config{
+ ProxyProtocolEnabled: true,
+ // ...
+}
+
+server := NewServer(cfg)
+// ...
+```
+
+PROXY protocol is supported by using [go-proxyproto](https://github.com/pires/go-proxyproto).
+Both PROXY v1 and PROXY v2 are supported out of the box.
+
+When enabled, incoming connections are checked for the PROXY header, and if present, the connection information is updated to reflect the original source address.
+Most commonly, you will use the source address via [Request.RemoteAddr](https://pkg.go.dev/net/http#Request.RemoteAddr).
+
+```go
+server.HTTP.HandleFunc("/your-endpoint", func(w http.ResponseWriter, r *http.Request) {
+ ip, _, err := net.SplitHostPort(r.RemoteAddr)
+ // ...
+})
+```
diff --git a/vendor/github.com/grafana/dskit/server/fake_server.pb.go b/vendor/github.com/grafana/dskit/server/fake_server.pb.go
index 75ee6b0a14e..4bb2d5a1f39 100644
--- a/vendor/github.com/grafana/dskit/server/fake_server.pb.go
+++ b/vendor/github.com/grafana/dskit/server/fake_server.pb.go
@@ -29,6 +29,49 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+type ProxyProtoIPResponse struct {
+ IP string `protobuf:"bytes,1,opt,name=IP,proto3" json:"IP,omitempty"`
+}
+
+func (m *ProxyProtoIPResponse) Reset() { *m = ProxyProtoIPResponse{} }
+func (*ProxyProtoIPResponse) ProtoMessage() {}
+func (*ProxyProtoIPResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a932e7b7b9f5c118, []int{0}
+}
+func (m *ProxyProtoIPResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProxyProtoIPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ProxyProtoIPResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ProxyProtoIPResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProxyProtoIPResponse.Merge(m, src)
+}
+func (m *ProxyProtoIPResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProxyProtoIPResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProxyProtoIPResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProxyProtoIPResponse proto.InternalMessageInfo
+
+func (m *ProxyProtoIPResponse) GetIP() string {
+ if m != nil {
+ return m.IP
+ }
+ return ""
+}
+
type FailWithHTTPErrorRequest struct {
Code int32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"`
}
@@ -36,7 +79,7 @@ type FailWithHTTPErrorRequest struct {
func (m *FailWithHTTPErrorRequest) Reset() { *m = FailWithHTTPErrorRequest{} }
func (*FailWithHTTPErrorRequest) ProtoMessage() {}
func (*FailWithHTTPErrorRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_a932e7b7b9f5c118, []int{0}
+ return fileDescriptor_a932e7b7b9f5c118, []int{1}
}
func (m *FailWithHTTPErrorRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -73,32 +116,61 @@ func (m *FailWithHTTPErrorRequest) GetCode() int32 {
}
func init() {
+ proto.RegisterType((*ProxyProtoIPResponse)(nil), "server.ProxyProtoIPResponse")
proto.RegisterType((*FailWithHTTPErrorRequest)(nil), "server.FailWithHTTPErrorRequest")
}
func init() { proto.RegisterFile("fake_server.proto", fileDescriptor_a932e7b7b9f5c118) }
var fileDescriptor_a932e7b7b9f5c118 = []byte{
- // 265 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x4b, 0xcc, 0x4e,
- 0x8d, 0x2f, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83,
- 0xf0, 0xa4, 0xa4, 0xd3, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xc1, 0xa2, 0x49, 0xa5, 0x69, 0xfa,
- 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x10, 0x45, 0x4a, 0x7a, 0x5c, 0x12, 0x6e, 0x89, 0x99, 0x39, 0xe1,
- 0x99, 0x25, 0x19, 0x1e, 0x21, 0x21, 0x01, 0xae, 0x45, 0x45, 0xf9, 0x45, 0x41, 0xa9, 0x85, 0xa5,
- 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0xce, 0xf9, 0x29, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a,
- 0xac, 0x41, 0x60, 0xb6, 0xd1, 0x6d, 0x26, 0x2e, 0x2e, 0xb7, 0xc4, 0xec, 0xd4, 0x60, 0xb0, 0xd9,
- 0x42, 0xd6, 0x5c, 0xec, 0xc1, 0xa5, 0xc9, 0xc9, 0xa9, 0xa9, 0x29, 0x42, 0x62, 0x7a, 0x10, 0x7b,
- 0xf4, 0x60, 0xf6, 0xe8, 0xb9, 0x82, 0xec, 0x91, 0xc2, 0x21, 0xae, 0xc4, 0x20, 0xe4, 0xc8, 0xc5,
- 0x0b, 0xb3, 0x1b, 0x6c, 0x2f, 0x19, 0x46, 0xf8, 0x73, 0x09, 0x62, 0x38, 0x5f, 0x48, 0x41, 0x0f,
- 0x1a, 0x0e, 0xb8, 0x7c, 0x86, 0xc7, 0x40, 0x4b, 0x2e, 0xd6, 0xe0, 0x9c, 0xd4, 0xd4, 0x02, 0xb2,
- 0xbc, 0xc3, 0x1d, 0x5c, 0x52, 0x94, 0x9a, 0x98, 0x4b, 0xa6, 0x01, 0x06, 0x8c, 0x4e, 0x26, 0x17,
- 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6,
- 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39,
- 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63,
- 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0x26, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff,
- 0xff, 0x43, 0x2b, 0x71, 0x6d, 0x04, 0x02, 0x00, 0x00,
-}
+ // 330 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xb1, 0x4e, 0x02, 0x41,
+ 0x10, 0x86, 0x77, 0x51, 0x30, 0xae, 0xd1, 0x84, 0x8d, 0x31, 0x04, 0xcd, 0x84, 0x5c, 0x61, 0xac,
+ 0x0e, 0xa3, 0x36, 0xc6, 0x4a, 0x09, 0xc4, 0xab, 0xdc, 0xdc, 0x91, 0x58, 0x9a, 0x03, 0x06, 0x24,
+ 0x1c, 0xec, 0xb9, 0x77, 0x67, 0xa4, 0xf3, 0x11, 0x7c, 0x0c, 0x3b, 0x5f, 0xc3, 0x92, 0x92, 0x52,
+ 0x96, 0xc6, 0x92, 0x47, 0x30, 0x2c, 0x12, 0x0b, 0xc5, 0xe2, 0xba, 0x9d, 0xc9, 0xe4, 0xff, 0xbf,
+ 0x7f, 0x7f, 0x96, 0x6f, 0xfb, 0x3d, 0xbc, 0x8b, 0x50, 0x3d, 0xa2, 0xb2, 0x43, 0x25, 0x63, 0xc9,
+ 0x73, 0x8b, 0xa9, 0xb8, 0xdf, 0x91, 0xb2, 0x13, 0x60, 0xd9, 0x6c, 0x1b, 0x49, 0xbb, 0x8c, 0xfd,
+ 0x30, 0x1e, 0x2e, 0x8e, 0xac, 0x43, 0xb6, 0x2b, 0x94, 0x7c, 0x1a, 0x8a, 0xf9, 0xe4, 0x08, 0x17,
+ 0xa3, 0x50, 0x0e, 0x22, 0xe4, 0x3b, 0x2c, 0xe3, 0x88, 0x02, 0x2d, 0xd1, 0xa3, 0x4d, 0x37, 0xe3,
+ 0x08, 0xcb, 0x66, 0x85, 0x9a, 0xdf, 0x0d, 0x6e, 0xbb, 0xf1, 0xfd, 0x75, 0xbd, 0x2e, 0xaa, 0x4a,
+ 0x49, 0xe5, 0xe2, 0x43, 0x82, 0x51, 0xcc, 0x39, 0x5b, 0xaf, 0xc8, 0x16, 0x9a, 0xeb, 0xac, 0x6b,
+ 0xde, 0x27, 0x6f, 0x6b, 0x8c, 0xd5, 0xfc, 0x1e, 0x7a, 0x86, 0x81, 0x5f, 0xb0, 0x0d, 0x2f, 0x69,
+ 0x36, 0x11, 0x5b, 0x7c, 0xcf, 0x5e, 0xf0, 0xd8, 0x4b, 0x1e, 0xbb, 0x3a, 0xe7, 0x29, 0xae, 0xd8,
+ 0x5b, 0x84, 0x5f, 0xb2, 0xed, 0xa5, 0xb7, 0xf1, 0x4d, 0x21, 0x71, 0xc3, 0xf2, 0xbf, 0xf0, 0x79,
+ 0xc9, 0xfe, 0xfe, 0xaf, 0x55, 0xc9, 0xfe, 0x11, 0x3c, 0x67, 0x59, 0x2f, 0x40, 0x0c, 0x53, 0xc5,
+ 0xd9, 0xf2, 0x62, 0x85, 0x7e, 0x3f, 0xa5, 0xc0, 0x31, 0xe5, 0x2e, 0x2b, 0xb8, 0x18, 0x27, 0x6a,
+ 0xf0, 0xd3, 0x5d, 0xc5, 0x0f, 0x02, 0x54, 0x8e, 0x58, 0xa9, 0x77, 0xb0, 0x4c, 0xfb, 0x57, 0xdf,
+ 0x16, 0xb9, 0x3a, 0x1b, 0x4d, 0x80, 0x8c, 0x27, 0x40, 0x66, 0x13, 0xa0, 0xcf, 0x1a, 0xe8, 0xab,
+ 0x06, 0xfa, 0xae, 0x81, 0x8e, 0x34, 0xd0, 0x0f, 0x0d, 0xf4, 0x53, 0x03, 0x99, 0x69, 0xa0, 0x2f,
+ 0x53, 0x20, 0xa3, 0x29, 0x90, 0xf1, 0x14, 0x48, 0x23, 0x67, 0x5c, 0x4e, 0xbf, 0x02, 0x00, 0x00,
+ 0xff, 0xff, 0xf3, 0x3d, 0xce, 0x89, 0x80, 0x02, 0x00, 0x00,
+}
+
+func (this *ProxyProtoIPResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+ that1, ok := that.(*ProxyProtoIPResponse)
+ if !ok {
+ that2, ok := that.(ProxyProtoIPResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.IP != that1.IP {
+ return false
+ }
+ return true
+}
func (this *FailWithHTTPErrorRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
@@ -123,6 +195,16 @@ func (this *FailWithHTTPErrorRequest) Equal(that interface{}) bool {
}
return true
}
+func (this *ProxyProtoIPResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&server.ProxyProtoIPResponse{")
+ s = append(s, "IP: "+fmt.Sprintf("%#v", this.IP)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
func (this *FailWithHTTPErrorRequest) GoString() string {
if this == nil {
return "nil"
@@ -159,6 +241,7 @@ type FakeServerClient interface {
FailWithHTTPError(ctx context.Context, in *FailWithHTTPErrorRequest, opts ...grpc.CallOption) (*empty.Empty, error)
Sleep(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error)
StreamSleep(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (FakeServer_StreamSleepClient, error)
+ ReturnProxyProtoCallerIP(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ProxyProtoIPResponse, error)
}
type fakeServerClient struct {
@@ -237,6 +320,15 @@ func (x *fakeServerStreamSleepClient) Recv() (*empty.Empty, error) {
return m, nil
}
+func (c *fakeServerClient) ReturnProxyProtoCallerIP(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ProxyProtoIPResponse, error) {
+ out := new(ProxyProtoIPResponse)
+ err := c.cc.Invoke(ctx, "/server.FakeServer/ReturnProxyProtoCallerIP", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// FakeServerServer is the server API for FakeServer service.
type FakeServerServer interface {
Succeed(context.Context, *empty.Empty) (*empty.Empty, error)
@@ -244,6 +336,7 @@ type FakeServerServer interface {
FailWithHTTPError(context.Context, *FailWithHTTPErrorRequest) (*empty.Empty, error)
Sleep(context.Context, *empty.Empty) (*empty.Empty, error)
StreamSleep(*empty.Empty, FakeServer_StreamSleepServer) error
+ ReturnProxyProtoCallerIP(context.Context, *empty.Empty) (*ProxyProtoIPResponse, error)
}
// UnimplementedFakeServerServer can be embedded to have forward compatible implementations.
@@ -265,6 +358,9 @@ func (*UnimplementedFakeServerServer) Sleep(ctx context.Context, req *empty.Empt
func (*UnimplementedFakeServerServer) StreamSleep(req *empty.Empty, srv FakeServer_StreamSleepServer) error {
return status.Errorf(codes.Unimplemented, "method StreamSleep not implemented")
}
+func (*UnimplementedFakeServerServer) ReturnProxyProtoCallerIP(ctx context.Context, req *empty.Empty) (*ProxyProtoIPResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ReturnProxyProtoCallerIP not implemented")
+}
func RegisterFakeServerServer(s *grpc.Server, srv FakeServerServer) {
s.RegisterService(&_FakeServer_serviceDesc, srv)
@@ -363,6 +459,24 @@ func (x *fakeServerStreamSleepServer) Send(m *empty.Empty) error {
return x.ServerStream.SendMsg(m)
}
+func _FakeServer_ReturnProxyProtoCallerIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(empty.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(FakeServerServer).ReturnProxyProtoCallerIP(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/server.FakeServer/ReturnProxyProtoCallerIP",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(FakeServerServer).ReturnProxyProtoCallerIP(ctx, req.(*empty.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _FakeServer_serviceDesc = grpc.ServiceDesc{
ServiceName: "server.FakeServer",
HandlerType: (*FakeServerServer)(nil),
@@ -383,6 +497,10 @@ var _FakeServer_serviceDesc = grpc.ServiceDesc{
MethodName: "Sleep",
Handler: _FakeServer_Sleep_Handler,
},
+ {
+ MethodName: "ReturnProxyProtoCallerIP",
+ Handler: _FakeServer_ReturnProxyProtoCallerIP_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
@@ -394,6 +512,36 @@ var _FakeServer_serviceDesc = grpc.ServiceDesc{
Metadata: "fake_server.proto",
}
+func (m *ProxyProtoIPResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProxyProtoIPResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProxyProtoIPResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.IP) > 0 {
+ i -= len(m.IP)
+ copy(dAtA[i:], m.IP)
+ i = encodeVarintFakeServer(dAtA, i, uint64(len(m.IP)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *FailWithHTTPErrorRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -433,6 +581,19 @@ func encodeVarintFakeServer(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
+func (m *ProxyProtoIPResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.IP)
+ if l > 0 {
+ n += 1 + l + sovFakeServer(uint64(l))
+ }
+ return n
+}
+
func (m *FailWithHTTPErrorRequest) Size() (n int) {
if m == nil {
return 0
@@ -451,6 +612,16 @@ func sovFakeServer(x uint64) (n int) {
func sozFakeServer(x uint64) (n int) {
return sovFakeServer(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (this *ProxyProtoIPResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProxyProtoIPResponse{`,
+ `IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *FailWithHTTPErrorRequest) String() string {
if this == nil {
return "nil"
@@ -469,6 +640,91 @@ func valueToStringFakeServer(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
+func (m *ProxyProtoIPResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFakeServer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProxyProtoIPResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProxyProtoIPResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFakeServer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthFakeServer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthFakeServer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IP = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipFakeServer(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthFakeServer
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthFakeServer
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *FailWithHTTPErrorRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/github.com/grafana/dskit/server/fake_server.proto b/vendor/github.com/grafana/dskit/server/fake_server.proto
index 248a6f244bd..0c4780cda0d 100644
--- a/vendor/github.com/grafana/dskit/server/fake_server.proto
+++ b/vendor/github.com/grafana/dskit/server/fake_server.proto
@@ -10,6 +10,11 @@ service FakeServer {
rpc FailWithHTTPError(FailWithHTTPErrorRequest) returns (google.protobuf.Empty) {};
rpc Sleep(google.protobuf.Empty) returns (google.protobuf.Empty) {};
rpc StreamSleep(google.protobuf.Empty) returns (stream google.protobuf.Empty) {};
+ rpc ReturnProxyProtoCallerIP(google.protobuf.Empty) returns (ProxyProtoIPResponse) {};
+}
+
+message ProxyProtoIPResponse {
+ string IP = 1;
}
message FailWithHTTPErrorRequest {
diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go
index effe2e54eaf..c39e3873c9e 100644
--- a/vendor/github.com/grafana/dskit/server/server.go
+++ b/vendor/github.com/grafana/dskit/server/server.go
@@ -17,13 +17,13 @@ import (
"strings"
"time"
- _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" // anonymous import to get godelatprof handlers registered
-
gokit_log "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gorilla/mux"
+ _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" // anonymous import to get godelatprof handlers registered
otgrpc "github.com/opentracing-contrib/go-grpc"
"github.com/opentracing/opentracing-go"
+ "github.com/pires/go-proxyproto"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/config"
@@ -31,6 +31,7 @@ import (
"golang.org/x/net/netutil"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/experimental"
"google.golang.org/grpc/keepalive"
"github.com/grafana/dskit/httpgrpc"
@@ -79,14 +80,15 @@ type Config struct {
// for details. A generally useful value is 1.1.
MetricsNativeHistogramFactor float64 `yaml:"-"`
- HTTPListenNetwork string `yaml:"http_listen_network"`
- HTTPListenAddress string `yaml:"http_listen_address"`
- HTTPListenPort int `yaml:"http_listen_port"`
- HTTPConnLimit int `yaml:"http_listen_conn_limit"`
- GRPCListenNetwork string `yaml:"grpc_listen_network"`
- GRPCListenAddress string `yaml:"grpc_listen_address"`
- GRPCListenPort int `yaml:"grpc_listen_port"`
- GRPCConnLimit int `yaml:"grpc_listen_conn_limit"`
+ HTTPListenNetwork string `yaml:"http_listen_network"`
+ HTTPListenAddress string `yaml:"http_listen_address"`
+ HTTPListenPort int `yaml:"http_listen_port"`
+ HTTPConnLimit int `yaml:"http_listen_conn_limit"`
+ GRPCListenNetwork string `yaml:"grpc_listen_network"`
+ GRPCListenAddress string `yaml:"grpc_listen_address"`
+ GRPCListenPort int `yaml:"grpc_listen_port"`
+ GRPCConnLimit int `yaml:"grpc_listen_conn_limit"`
+ ProxyProtocolEnabled bool `yaml:"proxy_protocol_enabled"`
CipherSuites string `yaml:"tls_cipher_suites"`
MinVersion string `yaml:"tls_min_version"`
@@ -125,6 +127,8 @@ type Config struct {
GRPCServerMinTimeBetweenPings time.Duration `yaml:"grpc_server_min_time_between_pings"`
GRPCServerPingWithoutStreamAllowed bool `yaml:"grpc_server_ping_without_stream_allowed"`
GRPCServerNumWorkers int `yaml:"grpc_server_num_workers"`
+ GRPCServerStatsTrackingEnabled bool `yaml:"grpc_server_stats_tracking_enabled"`
+ GRPCServerRecvBufferPoolsEnabled bool `yaml:"grpc_server_recv_buffer_pools_enabled"`
LogFormat string `yaml:"log_format"`
LogLevel log.Level `yaml:"log_level"`
@@ -190,6 +194,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.GRPCServerTimeout, "server.grpc.keepalive.timeout", time.Second*20, "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s")
f.DurationVar(&cfg.GRPCServerMinTimeBetweenPings, "server.grpc.keepalive.min-time-between-pings", 5*time.Minute, "Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection.")
f.BoolVar(&cfg.GRPCServerPingWithoutStreamAllowed, "server.grpc.keepalive.ping-without-stream-allowed", false, "If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection.")
+ f.BoolVar(&cfg.GRPCServerStatsTrackingEnabled, "server.grpc.stats-tracking-enabled", true, "If true, the request_message_bytes, response_message_bytes, and inflight_requests metrics will be tracked. Enabling this option prevents the use of memory pools for parsing gRPC request bodies and may lead to more memory allocations.")
+ f.BoolVar(&cfg.GRPCServerRecvBufferPoolsEnabled, "server.grpc.recv-buffer-pools-enabled", false, "If true, gGPC's buffer pools will be used to handle incoming requests. Enabling this feature can reduce memory allocation, but also requires disabling GRPC server stats tracking by setting `server.grpc.stats-tracking-enabled=false`. This is an experimental gRPC feature, so it might be removed in a future version of the gRPC library.")
f.IntVar(&cfg.GRPCServerNumWorkers, "server.grpc.num-workers", 0, "If non-zero, configures the amount of GRPC server workers used to serve the requests.")
f.StringVar(&cfg.PathPrefix, "server.path-prefix", "", "Base path to serve all API routes from (e.g. /v1/)")
f.StringVar(&cfg.LogFormat, "log.format", log.LogfmtFormat, "Output log messages in the given format. Valid formats: [logfmt, json]")
@@ -201,6 +207,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.LogRequestHeaders, "server.log-request-headers", false, "Optionally log request headers.")
f.StringVar(&cfg.LogRequestExcludeHeadersList, "server.log-request-headers-exclude-list", "", "Comma separated list of headers to exclude from loggin. Only used if server.log-request-headers is true.")
f.BoolVar(&cfg.LogRequestAtInfoLevel, "server.log-request-at-info-level-enabled", false, "Optionally log requests at info level instead of debug level. Applies to request headers as well if server.log-request-headers is enabled.")
+ f.BoolVar(&cfg.ProxyProtocolEnabled, "server.proxy-protocol-enabled", false, "Enables PROXY protocol.")
}
func (cfg *Config) registererOrDefault() prometheus.Registerer {
@@ -286,6 +293,11 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
grpcListener = netutil.LimitListener(grpcListener, cfg.GRPCConnLimit)
}
+ if cfg.ProxyProtocolEnabled {
+ httpListener = newProxyProtocolListener(httpListener, cfg.HTTPServerReadHeaderTimeout)
+ grpcListener = newProxyProtocolListener(grpcListener, cfg.HTTPServerReadHeaderTimeout)
+ }
+
cipherSuites, err := stringToCipherSuites(cfg.CipherSuites)
if err != nil {
return nil, err
@@ -407,13 +419,22 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
grpcOptions = append(grpcOptions, grpc.InTapHandle(grpcServerLimit.TapHandle), grpc.StatsHandler(grpcServerLimit))
}
- grpcOptions = append(grpcOptions,
- grpc.StatsHandler(middleware.NewStatsHandler(
- metrics.ReceivedMessageSize,
- metrics.SentMessageSize,
- metrics.InflightRequests,
- )),
- )
+ if cfg.GRPCServerStatsTrackingEnabled {
+ grpcOptions = append(grpcOptions,
+ grpc.StatsHandler(middleware.NewStatsHandler(
+ metrics.ReceivedMessageSize,
+ metrics.SentMessageSize,
+ metrics.InflightRequests,
+ )),
+ )
+ }
+
+ if cfg.GRPCServerRecvBufferPoolsEnabled {
+ if cfg.GRPCServerStatsTrackingEnabled {
+ return nil, fmt.Errorf("grpc_server_stats_tracking_enabled must be set to false if grpc_server_recv_buffer_pools_enabled is true")
+ }
+ grpcOptions = append(grpcOptions, experimental.RecvBufferPool(grpc.NewSharedBufferPool()))
+ }
grpcOptions = append(grpcOptions, cfg.GRPCOptions...)
if grpcTLSConfig != nil {
@@ -592,3 +613,13 @@ func (s *Server) Shutdown() {
_ = s.HTTPServer.Shutdown(ctx)
s.GRPC.GracefulStop()
}
+
+func newProxyProtocolListener(httpListener net.Listener, readHeaderTimeout time.Duration) net.Listener {
+ // Wraps the listener with a proxy protocol listener.
+ // NOTE: go-proxyproto supports non-PROXY, PROXY v1 and PROXY v2 protocols via the same listener.
+ // Therefore, enabling this feature does not break existing setups.
+ return &proxyproto.Listener{
+ Listener: httpListener,
+ ReadHeaderTimeout: readHeaderTimeout,
+ }
+}
diff --git a/vendor/github.com/pires/go-proxyproto/.gitignore b/vendor/github.com/pires/go-proxyproto/.gitignore
new file mode 100644
index 00000000000..a2d2c301976
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/.gitignore
@@ -0,0 +1,11 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+.idea
+bin
+pkg
+
+*.out
diff --git a/vendor/github.com/pires/go-proxyproto/LICENSE b/vendor/github.com/pires/go-proxyproto/LICENSE
new file mode 100644
index 00000000000..a65c05a6271
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 Paulo Pires
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/pires/go-proxyproto/README.md b/vendor/github.com/pires/go-proxyproto/README.md
new file mode 100644
index 00000000000..982707cceef
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/README.md
@@ -0,0 +1,162 @@
+# go-proxyproto
+
+[![Actions Status](https://github.com/pires/go-proxyproto/workflows/test/badge.svg)](https://github.com/pires/go-proxyproto/actions)
+[![Coverage Status](https://coveralls.io/repos/github/pires/go-proxyproto/badge.svg?branch=master)](https://coveralls.io/github/pires/go-proxyproto?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pires/go-proxyproto)](https://goreportcard.com/report/github.com/pires/go-proxyproto)
+[![](https://godoc.org/github.com/pires/go-proxyproto?status.svg)](https://pkg.go.dev/github.com/pires/go-proxyproto?tab=doc)
+
+
+A Go library implementation of the [PROXY protocol, versions 1 and 2](https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt),
+which provides, as per specification:
+> (...) a convenient way to safely transport connection
+> information such as a client's address across multiple layers of NAT or TCP
+> proxies. It is designed to require little changes to existing components and
+> to limit the performance impact caused by the processing of the transported
+> information.
+
+This library is to be used in one of or both proxy clients and proxy servers that need to support said protocol.
+Both protocol versions, 1 (text-based) and 2 (binary-based) are supported.
+
+## Installation
+
+```shell
+$ go get -u github.com/pires/go-proxyproto
+```
+
+## Usage
+
+### Client
+
+```go
+package main
+
+import (
+ "io"
+ "log"
+ "net"
+
+ proxyproto "github.com/pires/go-proxyproto"
+)
+
+func chkErr(err error) {
+ if err != nil {
+ log.Fatalf("Error: %s", err.Error())
+ }
+}
+
+func main() {
+ // Dial some proxy listener e.g. https://github.com/mailgun/proxyproto
+ target, err := net.ResolveTCPAddr("tcp", "127.0.0.1:2319")
+ chkErr(err)
+
+ conn, err := net.DialTCP("tcp", nil, target)
+ chkErr(err)
+
+ defer conn.Close()
+
+ // Create a proxyprotocol header or use HeaderProxyFromAddrs() if you
+ // have two conn's
+ header := &proxyproto.Header{
+ Version: 1,
+ Command: proxyproto.PROXY,
+ TransportProtocol: proxyproto.TCPv4,
+ SourceAddr: &net.TCPAddr{
+ IP: net.ParseIP("10.1.1.1"),
+ Port: 1000,
+ },
+ DestinationAddr: &net.TCPAddr{
+ IP: net.ParseIP("20.2.2.2"),
+ Port: 2000,
+ },
+ }
+ // After the connection was created write the proxy headers first
+ _, err = header.WriteTo(conn)
+ chkErr(err)
+ // Then your data... e.g.:
+ _, err = io.WriteString(conn, "HELO")
+ chkErr(err)
+}
+```
+
+### Server
+
+```go
+package main
+
+import (
+ "log"
+ "net"
+
+ proxyproto "github.com/pires/go-proxyproto"
+)
+
+func main() {
+ // Create a listener
+ addr := "localhost:9876"
+ list, err := net.Listen("tcp", addr)
+ if err != nil {
+ log.Fatalf("couldn't listen to %q: %q\n", addr, err.Error())
+ }
+
+ // Wrap listener in a proxyproto listener
+ proxyListener := &proxyproto.Listener{Listener: list}
+ defer proxyListener.Close()
+
+ // Wait for a connection and accept it
+ conn, err := proxyListener.Accept()
+ defer conn.Close()
+
+ // Print connection details
+ if conn.LocalAddr() == nil {
+ log.Fatal("couldn't retrieve local address")
+ }
+ log.Printf("local address: %q", conn.LocalAddr().String())
+
+ if conn.RemoteAddr() == nil {
+ log.Fatal("couldn't retrieve remote address")
+ }
+ log.Printf("remote address: %q", conn.RemoteAddr().String())
+}
+```
+
+### HTTP Server
+```go
+package main
+
+import (
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/pires/go-proxyproto"
+)
+
+func main() {
+ server := http.Server{
+ Addr: ":8080",
+ }
+
+ ln, err := net.Listen("tcp", server.Addr)
+ if err != nil {
+ panic(err)
+ }
+
+ proxyListener := &proxyproto.Listener{
+ Listener: ln,
+ ReadHeaderTimeout: 10 * time.Second,
+ }
+ defer proxyListener.Close()
+
+ server.Serve(proxyListener)
+}
+```
+
+## Special notes
+
+### AWS
+
+AWS Network Load Balancer (NLB) does not push the PPV2 header until the client starts sending the data. This is a problem if your server speaks first. e.g. SMTP, FTP, SSH etc.
+
+By default, NLB target group attribute `proxy_protocol_v2.client_to_server.header_placement` has the value `on_first_ack_with_payload`. You need to contact AWS support to change it to `on_first_ack`, instead.
+
+Just to be clear, you need this fix only if your server is designed to speak first.
diff --git a/vendor/github.com/pires/go-proxyproto/addr_proto.go b/vendor/github.com/pires/go-proxyproto/addr_proto.go
new file mode 100644
index 00000000000..d254fc41317
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/addr_proto.go
@@ -0,0 +1,62 @@
+package proxyproto
+
+// AddressFamilyAndProtocol represents address family and transport protocol.
+type AddressFamilyAndProtocol byte
+
+const (
+ UNSPEC AddressFamilyAndProtocol = '\x00'
+ TCPv4 AddressFamilyAndProtocol = '\x11'
+ UDPv4 AddressFamilyAndProtocol = '\x12'
+ TCPv6 AddressFamilyAndProtocol = '\x21'
+ UDPv6 AddressFamilyAndProtocol = '\x22'
+ UnixStream AddressFamilyAndProtocol = '\x31'
+ UnixDatagram AddressFamilyAndProtocol = '\x32'
+)
+
+// IsIPv4 returns true if the address family is IPv4 (AF_INET4), false otherwise.
+func (ap AddressFamilyAndProtocol) IsIPv4() bool {
+ return ap&0xF0 == 0x10
+}
+
+// IsIPv6 returns true if the address family is IPv6 (AF_INET6), false otherwise.
+func (ap AddressFamilyAndProtocol) IsIPv6() bool {
+ return ap&0xF0 == 0x20
+}
+
+// IsUnix returns true if the address family is UNIX (AF_UNIX), false otherwise.
+func (ap AddressFamilyAndProtocol) IsUnix() bool {
+ return ap&0xF0 == 0x30
+}
+
+// IsStream returns true if the transport protocol is TCP or STREAM (SOCK_STREAM), false otherwise.
+func (ap AddressFamilyAndProtocol) IsStream() bool {
+ return ap&0x0F == 0x01
+}
+
+// IsDatagram returns true if the transport protocol is UDP or DGRAM (SOCK_DGRAM), false otherwise.
+func (ap AddressFamilyAndProtocol) IsDatagram() bool {
+ return ap&0x0F == 0x02
+}
+
+// IsUnspec returns true if the transport protocol or address family is unspecified, false otherwise.
+func (ap AddressFamilyAndProtocol) IsUnspec() bool {
+ return (ap&0xF0 == 0x00) || (ap&0x0F == 0x00)
+}
+
+func (ap AddressFamilyAndProtocol) toByte() byte {
+ if ap.IsIPv4() && ap.IsStream() {
+ return byte(TCPv4)
+ } else if ap.IsIPv4() && ap.IsDatagram() {
+ return byte(UDPv4)
+ } else if ap.IsIPv6() && ap.IsStream() {
+ return byte(TCPv6)
+ } else if ap.IsIPv6() && ap.IsDatagram() {
+ return byte(UDPv6)
+ } else if ap.IsUnix() && ap.IsStream() {
+ return byte(UnixStream)
+ } else if ap.IsUnix() && ap.IsDatagram() {
+ return byte(UnixDatagram)
+ }
+
+ return byte(UNSPEC)
+}
diff --git a/vendor/github.com/pires/go-proxyproto/header.go b/vendor/github.com/pires/go-proxyproto/header.go
new file mode 100644
index 00000000000..81ebeb387eb
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/header.go
@@ -0,0 +1,280 @@
+// Package proxyproto implements Proxy Protocol (v1 and v2) parser and writer, as per specification:
+// https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt
+package proxyproto
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "net"
+ "time"
+)
+
+var (
+ // Protocol
+ SIGV1 = []byte{'\x50', '\x52', '\x4F', '\x58', '\x59'}
+ SIGV2 = []byte{'\x0D', '\x0A', '\x0D', '\x0A', '\x00', '\x0D', '\x0A', '\x51', '\x55', '\x49', '\x54', '\x0A'}
+
+ ErrCantReadVersion1Header = errors.New("proxyproto: can't read version 1 header")
+ ErrVersion1HeaderTooLong = errors.New("proxyproto: version 1 header must be 107 bytes or less")
+ ErrLineMustEndWithCrlf = errors.New("proxyproto: version 1 header is invalid, must end with \\r\\n")
+ ErrCantReadProtocolVersionAndCommand = errors.New("proxyproto: can't read proxy protocol version and command")
+ ErrCantReadAddressFamilyAndProtocol = errors.New("proxyproto: can't read address family or protocol")
+ ErrCantReadLength = errors.New("proxyproto: can't read length")
+ ErrCantResolveSourceUnixAddress = errors.New("proxyproto: can't resolve source Unix address")
+ ErrCantResolveDestinationUnixAddress = errors.New("proxyproto: can't resolve destination Unix address")
+ ErrNoProxyProtocol = errors.New("proxyproto: proxy protocol signature not present")
+ ErrUnknownProxyProtocolVersion = errors.New("proxyproto: unknown proxy protocol version")
+ ErrUnsupportedProtocolVersionAndCommand = errors.New("proxyproto: unsupported proxy protocol version and command")
+ ErrUnsupportedAddressFamilyAndProtocol = errors.New("proxyproto: unsupported address family and protocol")
+ ErrInvalidLength = errors.New("proxyproto: invalid length")
+ ErrInvalidAddress = errors.New("proxyproto: invalid address")
+ ErrInvalidPortNumber = errors.New("proxyproto: invalid port number")
+ ErrSuperfluousProxyHeader = errors.New("proxyproto: upstream connection sent PROXY header but isn't allowed to send one")
+)
+
+// Header is the placeholder for proxy protocol header.
+type Header struct {
+ Version byte
+ Command ProtocolVersionAndCommand
+ TransportProtocol AddressFamilyAndProtocol
+ SourceAddr net.Addr
+ DestinationAddr net.Addr
+ rawTLVs []byte
+}
+
+// HeaderProxyFromAddrs creates a new PROXY header from a source and a
+// destination address. If version is zero, the latest protocol version is
+// used.
+//
+// The header is filled on a best-effort basis: if hints cannot be inferred
+// from the provided addresses, the header will be left unspecified.
+func HeaderProxyFromAddrs(version byte, sourceAddr, destAddr net.Addr) *Header {
+ if version < 1 || version > 2 {
+ version = 2
+ }
+ h := &Header{
+ Version: version,
+ Command: LOCAL,
+ TransportProtocol: UNSPEC,
+ }
+ switch sourceAddr := sourceAddr.(type) {
+ case *net.TCPAddr:
+ if _, ok := destAddr.(*net.TCPAddr); !ok {
+ break
+ }
+ if len(sourceAddr.IP.To4()) == net.IPv4len {
+ h.TransportProtocol = TCPv4
+ } else if len(sourceAddr.IP) == net.IPv6len {
+ h.TransportProtocol = TCPv6
+ }
+ case *net.UDPAddr:
+ if _, ok := destAddr.(*net.UDPAddr); !ok {
+ break
+ }
+ if len(sourceAddr.IP.To4()) == net.IPv4len {
+ h.TransportProtocol = UDPv4
+ } else if len(sourceAddr.IP) == net.IPv6len {
+ h.TransportProtocol = UDPv6
+ }
+ case *net.UnixAddr:
+ if _, ok := destAddr.(*net.UnixAddr); !ok {
+ break
+ }
+ switch sourceAddr.Net {
+ case "unix":
+ h.TransportProtocol = UnixStream
+ case "unixgram":
+ h.TransportProtocol = UnixDatagram
+ }
+ }
+ if h.TransportProtocol != UNSPEC {
+ h.Command = PROXY
+ h.SourceAddr = sourceAddr
+ h.DestinationAddr = destAddr
+ }
+ return h
+}
+
+func (header *Header) TCPAddrs() (sourceAddr, destAddr *net.TCPAddr, ok bool) {
+ if !header.TransportProtocol.IsStream() {
+ return nil, nil, false
+ }
+ sourceAddr, sourceOK := header.SourceAddr.(*net.TCPAddr)
+ destAddr, destOK := header.DestinationAddr.(*net.TCPAddr)
+ return sourceAddr, destAddr, sourceOK && destOK
+}
+
+func (header *Header) UDPAddrs() (sourceAddr, destAddr *net.UDPAddr, ok bool) {
+ if !header.TransportProtocol.IsDatagram() {
+ return nil, nil, false
+ }
+ sourceAddr, sourceOK := header.SourceAddr.(*net.UDPAddr)
+ destAddr, destOK := header.DestinationAddr.(*net.UDPAddr)
+ return sourceAddr, destAddr, sourceOK && destOK
+}
+
+func (header *Header) UnixAddrs() (sourceAddr, destAddr *net.UnixAddr, ok bool) {
+ if !header.TransportProtocol.IsUnix() {
+ return nil, nil, false
+ }
+ sourceAddr, sourceOK := header.SourceAddr.(*net.UnixAddr)
+ destAddr, destOK := header.DestinationAddr.(*net.UnixAddr)
+ return sourceAddr, destAddr, sourceOK && destOK
+}
+
+func (header *Header) IPs() (sourceIP, destIP net.IP, ok bool) {
+ if sourceAddr, destAddr, ok := header.TCPAddrs(); ok {
+ return sourceAddr.IP, destAddr.IP, true
+ } else if sourceAddr, destAddr, ok := header.UDPAddrs(); ok {
+ return sourceAddr.IP, destAddr.IP, true
+ } else {
+ return nil, nil, false
+ }
+}
+
+func (header *Header) Ports() (sourcePort, destPort int, ok bool) {
+ if sourceAddr, destAddr, ok := header.TCPAddrs(); ok {
+ return sourceAddr.Port, destAddr.Port, true
+ } else if sourceAddr, destAddr, ok := header.UDPAddrs(); ok {
+ return sourceAddr.Port, destAddr.Port, true
+ } else {
+ return 0, 0, false
+ }
+}
+
+// EqualTo returns true if headers are equivalent, false otherwise.
+// Deprecated: use EqualsTo instead. This method will eventually be removed.
+func (header *Header) EqualTo(otherHeader *Header) bool {
+ return header.EqualsTo(otherHeader)
+}
+
+// EqualsTo returns true if headers are equivalent, false otherwise.
+func (header *Header) EqualsTo(otherHeader *Header) bool {
+ if otherHeader == nil {
+ return false
+ }
+ // TLVs only exist for version 2
+ if header.Version == 2 && !bytes.Equal(header.rawTLVs, otherHeader.rawTLVs) {
+ return false
+ }
+ if header.Version != otherHeader.Version || header.Command != otherHeader.Command || header.TransportProtocol != otherHeader.TransportProtocol {
+ return false
+ }
+ // Return early for header with LOCAL command, which contains no address information
+ if header.Command == LOCAL {
+ return true
+ }
+ return header.SourceAddr.String() == otherHeader.SourceAddr.String() &&
+ header.DestinationAddr.String() == otherHeader.DestinationAddr.String()
+}
+
+// WriteTo renders a proxy protocol header in a format and writes it to an io.Writer.
+func (header *Header) WriteTo(w io.Writer) (int64, error) {
+ buf, err := header.Format()
+ if err != nil {
+ return 0, err
+ }
+
+ return bytes.NewBuffer(buf).WriteTo(w)
+}
+
+// Format renders a proxy protocol header in a format to write over the wire.
+func (header *Header) Format() ([]byte, error) {
+ switch header.Version {
+ case 1:
+ return header.formatVersion1()
+ case 2:
+ return header.formatVersion2()
+ default:
+ return nil, ErrUnknownProxyProtocolVersion
+ }
+}
+
+// TLVs returns the TLVs stored into this header, if they exist. TLVs are optional for v2 of the protocol.
+func (header *Header) TLVs() ([]TLV, error) {
+ return SplitTLVs(header.rawTLVs)
+}
+
+// SetTLVs sets the TLVs stored in this header. This method replaces any
+// previous TLV.
+func (header *Header) SetTLVs(tlvs []TLV) error {
+ raw, err := JoinTLVs(tlvs)
+ if err != nil {
+ return err
+ }
+ header.rawTLVs = raw
+ return nil
+}
+
+// Read identifies the proxy protocol version and reads the remaining of
+// the header, accordingly.
+//
+// If proxy protocol header signature is not present, the reader buffer remains untouched
+// and is safe for reading outside of this code.
+//
+// If proxy protocol header signature is present but an error is raised while processing
+// the remaining header, assume the reader buffer to be in a corrupt state.
+// Also, this operation will block until enough bytes are available for peeking.
+func Read(reader *bufio.Reader) (*Header, error) {
+ // In order to improve speed for small non-PROXYed packets, take a peek at the first byte alone.
+ b1, err := reader.Peek(1)
+ if err != nil {
+ if err == io.EOF {
+ return nil, ErrNoProxyProtocol
+ }
+ return nil, err
+ }
+
+ if bytes.Equal(b1[:1], SIGV1[:1]) || bytes.Equal(b1[:1], SIGV2[:1]) {
+ signature, err := reader.Peek(5)
+ if err != nil {
+ if err == io.EOF {
+ return nil, ErrNoProxyProtocol
+ }
+ return nil, err
+ }
+ if bytes.Equal(signature[:5], SIGV1) {
+ return parseVersion1(reader)
+ }
+
+ signature, err = reader.Peek(12)
+ if err != nil {
+ if err == io.EOF {
+ return nil, ErrNoProxyProtocol
+ }
+ return nil, err
+ }
+ if bytes.Equal(signature[:12], SIGV2) {
+ return parseVersion2(reader)
+ }
+ }
+
+ return nil, ErrNoProxyProtocol
+}
+
+// ReadTimeout acts as Read but takes a timeout. If that timeout is reached, it's assumed
+// there's no proxy protocol header.
+func ReadTimeout(reader *bufio.Reader, timeout time.Duration) (*Header, error) {
+ type header struct {
+ h *Header
+ e error
+ }
+ read := make(chan *header, 1)
+
+ go func() {
+ h := &header{}
+ h.h, h.e = Read(reader)
+ read <- h
+ }()
+
+ timer := time.NewTimer(timeout)
+ select {
+ case result := <-read:
+ timer.Stop()
+ return result.h, result.e
+ case <-timer.C:
+ return nil, ErrNoProxyProtocol
+ }
+}
diff --git a/vendor/github.com/pires/go-proxyproto/policy.go b/vendor/github.com/pires/go-proxyproto/policy.go
new file mode 100644
index 00000000000..6d505be4c80
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/policy.go
@@ -0,0 +1,172 @@
+package proxyproto
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// PolicyFunc can be used to decide whether to trust the PROXY info from
+// upstream. If set, the connecting address is passed in as an argument.
+//
+// See below for the different policies.
+//
+// In case an error is returned the connection is denied.
+type PolicyFunc func(upstream net.Addr) (Policy, error)
+
+// Policy defines how a connection with a PROXY header address is treated.
+type Policy int
+
+const (
+ // USE address from PROXY header
+ USE Policy = iota
+ // IGNORE address from PROXY header, but accept connection
+ IGNORE
+ // REJECT connection when PROXY header is sent
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is present, subsequent reads do not. It is the task of
+ // the code using the connection to handle that case properly.
+ REJECT
+ // REQUIRE connection to send PROXY header, reject if not present
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is not present, subsequent reads do not. It is the task
+ // of the code using the connection to handle that case properly.
+ REQUIRE
+ // SKIP accepts a connection without requiring the PROXY header
+ // Note: an example usage can be found in the SkipProxyHeaderForCIDR
+ // function.
+ SKIP
+)
+
+// SkipProxyHeaderForCIDR returns a PolicyFunc which can be used to accept a
+// connection from a skipHeaderCIDR without requiring a PROXY header, e.g.
+// Kubernetes pods local traffic. The def is a policy to use when an upstream
+// address doesn't match the skipHeaderCIDR.
+func SkipProxyHeaderForCIDR(skipHeaderCIDR *net.IPNet, def Policy) PolicyFunc {
+ return func(upstream net.Addr) (Policy, error) {
+ ip, err := ipFromAddr(upstream)
+ if err != nil {
+ return def, err
+ }
+
+ if skipHeaderCIDR != nil && skipHeaderCIDR.Contains(ip) {
+ return SKIP, nil
+ }
+
+ return def, nil
+ }
+}
+
+// WithPolicy adds given policy to a connection when passed as option to NewConn()
+func WithPolicy(p Policy) func(*Conn) {
+ return func(c *Conn) {
+ c.ProxyHeaderPolicy = p
+ }
+}
+
+// LaxWhiteListPolicy returns a PolicyFunc which decides whether the
+// upstream ip is allowed to send a proxy header based on a list of allowed
+// IP addresses and IP ranges. In case upstream IP is not in list the proxy
+// header will be ignored. If one of the provided IP addresses or IP ranges
+// is invalid it will return an error instead of a PolicyFunc.
+func LaxWhiteListPolicy(allowed []string) (PolicyFunc, error) {
+ allowFrom, err := parse(allowed)
+ if err != nil {
+ return nil, err
+ }
+
+ return whitelistPolicy(allowFrom, IGNORE), nil
+}
+
+// MustLaxWhiteListPolicy returns a LaxWhiteListPolicy but will panic if one
+// of the provided IP addresses or IP ranges is invalid.
+func MustLaxWhiteListPolicy(allowed []string) PolicyFunc {
+ pfunc, err := LaxWhiteListPolicy(allowed)
+ if err != nil {
+ panic(err)
+ }
+
+ return pfunc
+}
+
+// StrictWhiteListPolicy returns a PolicyFunc which decides whether the
+// upstream ip is allowed to send a proxy header based on a list of allowed
+// IP addresses and IP ranges. In case upstream IP is not in list reading on
+// the connection will be refused on the first read. Please note: subsequent
+// reads do not error. It is the task of the code using the connection to
+// handle that case properly. If one of the provided IP addresses or IP
+// ranges is invalid it will return an error instead of a PolicyFunc.
+func StrictWhiteListPolicy(allowed []string) (PolicyFunc, error) {
+ allowFrom, err := parse(allowed)
+ if err != nil {
+ return nil, err
+ }
+
+ return whitelistPolicy(allowFrom, REJECT), nil
+}
+
+// MustStrictWhiteListPolicy returns a StrictWhiteListPolicy but will panic
+// if one of the provided IP addresses or IP ranges is invalid.
+func MustStrictWhiteListPolicy(allowed []string) PolicyFunc {
+ pfunc, err := StrictWhiteListPolicy(allowed)
+ if err != nil {
+ panic(err)
+ }
+
+ return pfunc
+}
+
+func whitelistPolicy(allowed []func(net.IP) bool, def Policy) PolicyFunc {
+ return func(upstream net.Addr) (Policy, error) {
+ upstreamIP, err := ipFromAddr(upstream)
+ if err != nil {
+ // something is wrong with the source IP, better reject the connection
+ return REJECT, err
+ }
+
+ for _, allowFrom := range allowed {
+ if allowFrom(upstreamIP) {
+ return USE, nil
+ }
+ }
+
+ return def, nil
+ }
+}
+
+func parse(allowed []string) ([]func(net.IP) bool, error) {
+ a := make([]func(net.IP) bool, len(allowed))
+ for i, allowFrom := range allowed {
+ if strings.LastIndex(allowFrom, "/") > 0 {
+ _, ipRange, err := net.ParseCIDR(allowFrom)
+ if err != nil {
+ return nil, fmt.Errorf("proxyproto: given string %q is not a valid IP range: %v", allowFrom, err)
+ }
+
+ a[i] = ipRange.Contains
+ } else {
+ allowed := net.ParseIP(allowFrom)
+ if allowed == nil {
+ return nil, fmt.Errorf("proxyproto: given string %q is not a valid IP address", allowFrom)
+ }
+
+ a[i] = allowed.Equal
+ }
+ }
+
+ return a, nil
+}
+
+func ipFromAddr(upstream net.Addr) (net.IP, error) {
+ upstreamString, _, err := net.SplitHostPort(upstream.String())
+ if err != nil {
+ return nil, err
+ }
+
+ upstreamIP := net.ParseIP(upstreamString)
+ if nil == upstreamIP {
+ return nil, fmt.Errorf("proxyproto: invalid IP address")
+ }
+
+ return upstreamIP, nil
+}
diff --git a/vendor/github.com/pires/go-proxyproto/protocol.go b/vendor/github.com/pires/go-proxyproto/protocol.go
new file mode 100644
index 00000000000..4ce16a2765b
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/protocol.go
@@ -0,0 +1,319 @@
+package proxyproto
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// DefaultReadHeaderTimeout is how long header processing waits for header to
+// be read from the wire, if Listener.ReaderHeaderTimeout is not set.
+// It's kept as a global variable so to make it easier to find and override,
+// e.g. go build -ldflags -X "github.com/pires/go-proxyproto.DefaultReadHeaderTimeout=1s"
+var DefaultReadHeaderTimeout = 10 * time.Second
+
+// Listener is used to wrap an underlying listener,
+// whose connections may be using the HAProxy Proxy Protocol.
+// If the connection is using the protocol, the RemoteAddr() will return
+// the correct client address. ReadHeaderTimeout will be applied to all
+// connections in order to prevent blocking operations. If no ReadHeaderTimeout
+// is set, a default of 200ms will be used. This can be disabled by setting the
+// timeout to < 0.
+type Listener struct {
+ Listener net.Listener
+ Policy PolicyFunc
+ ValidateHeader Validator
+ ReadHeaderTimeout time.Duration
+}
+
+// Conn is used to wrap and underlying connection which
+// may be speaking the Proxy Protocol. If it is, the RemoteAddr() will
+// return the address of the client instead of the proxy address. Each connection
+// will have its own readHeaderTimeout and readDeadline set by the Accept() call.
+type Conn struct {
+ readDeadline atomic.Value // time.Time
+ once sync.Once
+ readErr error
+ conn net.Conn
+ Validate Validator
+ bufReader *bufio.Reader
+ header *Header
+ ProxyHeaderPolicy Policy
+ readHeaderTimeout time.Duration
+}
+
+// Validator receives a header and decides whether it is a valid one
+// In case the header is not deemed valid it should return an error.
+type Validator func(*Header) error
+
+// ValidateHeader adds given validator for proxy headers to a connection when passed as option to NewConn()
+func ValidateHeader(v Validator) func(*Conn) {
+ return func(c *Conn) {
+ if v != nil {
+ c.Validate = v
+ }
+ }
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (p *Listener) Accept() (net.Conn, error) {
+ // Get the underlying connection
+ conn, err := p.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+
+ proxyHeaderPolicy := USE
+ if p.Policy != nil {
+ proxyHeaderPolicy, err = p.Policy(conn.RemoteAddr())
+ if err != nil {
+ // can't decide the policy, we can't accept the connection
+ conn.Close()
+ return nil, err
+ }
+ // Handle a connection as a regular one
+ if proxyHeaderPolicy == SKIP {
+ return conn, nil
+ }
+ }
+
+ newConn := NewConn(
+ conn,
+ WithPolicy(proxyHeaderPolicy),
+ ValidateHeader(p.ValidateHeader),
+ )
+
+ // If the ReadHeaderTimeout for the listener is unset, use the default timeout.
+ if p.ReadHeaderTimeout == 0 {
+ p.ReadHeaderTimeout = DefaultReadHeaderTimeout
+ }
+
+ // Set the readHeaderTimeout of the new conn to the value of the listener
+ newConn.readHeaderTimeout = p.ReadHeaderTimeout
+
+ return newConn, nil
+}
+
+// Close closes the underlying listener.
+func (p *Listener) Close() error {
+ return p.Listener.Close()
+}
+
+// Addr returns the underlying listener's network address.
+func (p *Listener) Addr() net.Addr {
+ return p.Listener.Addr()
+}
+
+// NewConn is used to wrap a net.Conn that may be speaking
+// the proxy protocol into a proxyproto.Conn
+func NewConn(conn net.Conn, opts ...func(*Conn)) *Conn {
+ pConn := &Conn{
+ bufReader: bufio.NewReader(conn),
+ conn: conn,
+ }
+
+ for _, opt := range opts {
+ opt(pConn)
+ }
+
+ return pConn
+}
+
+// Read is check for the proxy protocol header when doing
+// the initial scan. If there is an error parsing the header,
+// it is returned and the socket is closed.
+func (p *Conn) Read(b []byte) (int, error) {
+ p.once.Do(func() {
+ p.readErr = p.readHeader()
+ })
+ if p.readErr != nil {
+ return 0, p.readErr
+ }
+
+ return p.bufReader.Read(b)
+}
+
+// Write wraps original conn.Write
+func (p *Conn) Write(b []byte) (int, error) {
+ return p.conn.Write(b)
+}
+
+// Close wraps original conn.Close
+func (p *Conn) Close() error {
+ return p.conn.Close()
+}
+
+// ProxyHeader returns the proxy protocol header, if any. If an error occurs
+// while reading the proxy header, nil is returned.
+func (p *Conn) ProxyHeader() *Header {
+ p.once.Do(func() { p.readErr = p.readHeader() })
+ return p.header
+}
+
+// LocalAddr returns the address of the server if the proxy
+// protocol is being used, otherwise just returns the address of
+// the socket server. In case an error happens on reading the
+// proxy header the original LocalAddr is returned, not the one
+// from the proxy header even if the proxy header itself is
+// syntactically correct.
+func (p *Conn) LocalAddr() net.Addr {
+ p.once.Do(func() { p.readErr = p.readHeader() })
+ if p.header == nil || p.header.Command.IsLocal() || p.readErr != nil {
+ return p.conn.LocalAddr()
+ }
+
+ return p.header.DestinationAddr
+}
+
+// RemoteAddr returns the address of the client if the proxy
+// protocol is being used, otherwise just returns the address of
+// the socket peer. In case an error happens on reading the
+// proxy header the original RemoteAddr is returned, not the one
+// from the proxy header even if the proxy header itself is
+// syntactically correct.
+func (p *Conn) RemoteAddr() net.Addr {
+ p.once.Do(func() { p.readErr = p.readHeader() })
+ if p.header == nil || p.header.Command.IsLocal() || p.readErr != nil {
+ return p.conn.RemoteAddr()
+ }
+
+ return p.header.SourceAddr
+}
+
+// Raw returns the underlying connection which can be casted to
+// a concrete type, allowing access to specialized functions.
+//
+// Use this ONLY if you know exactly what you are doing.
+func (p *Conn) Raw() net.Conn {
+ return p.conn
+}
+
+// TCPConn returns the underlying TCP connection,
+// allowing access to specialized functions.
+//
+// Use this ONLY if you know exactly what you are doing.
+func (p *Conn) TCPConn() (conn *net.TCPConn, ok bool) {
+ conn, ok = p.conn.(*net.TCPConn)
+ return
+}
+
+// UnixConn returns the underlying Unix socket connection,
+// allowing access to specialized functions.
+//
+// Use this ONLY if you know exactly what you are doing.
+func (p *Conn) UnixConn() (conn *net.UnixConn, ok bool) {
+ conn, ok = p.conn.(*net.UnixConn)
+ return
+}
+
+// UDPConn returns the underlying UDP connection,
+// allowing access to specialized functions.
+//
+// Use this ONLY if you know exactly what you are doing.
+func (p *Conn) UDPConn() (conn *net.UDPConn, ok bool) {
+ conn, ok = p.conn.(*net.UDPConn)
+ return
+}
+
+// SetDeadline wraps original conn.SetDeadline
+func (p *Conn) SetDeadline(t time.Time) error {
+ p.readDeadline.Store(t)
+ return p.conn.SetDeadline(t)
+}
+
+// SetReadDeadline wraps original conn.SetReadDeadline
+func (p *Conn) SetReadDeadline(t time.Time) error {
+ // Set a local var that tells us the desired deadline. This is
+ // needed in order to reset the read deadline to the one that is
+ // desired by the user, rather than an empty deadline.
+ p.readDeadline.Store(t)
+ return p.conn.SetReadDeadline(t)
+}
+
+// SetWriteDeadline wraps original conn.SetWriteDeadline
+func (p *Conn) SetWriteDeadline(t time.Time) error {
+ return p.conn.SetWriteDeadline(t)
+}
+
+func (p *Conn) readHeader() error {
+ // If the connection's readHeaderTimeout is more than 0,
+ // push our deadline back to now plus the timeout. This should only
+ // run on the connection, as we don't want to override the previous
+ // read deadline the user may have used.
+ if p.readHeaderTimeout > 0 {
+ if err := p.conn.SetReadDeadline(time.Now().Add(p.readHeaderTimeout)); err != nil {
+ return err
+ }
+ }
+
+ header, err := Read(p.bufReader)
+
+ // If the connection's readHeaderTimeout is more than 0, undo the change to the
+ // deadline that we made above. Because we retain the readDeadline as part of our
+ // SetReadDeadline override, we know the user's desired deadline so we use that.
+ // Therefore, we check whether the error is a net.Timeout and if it is, we decide
+ // the proxy proto does not exist and set the error accordingly.
+ if p.readHeaderTimeout > 0 {
+ t := p.readDeadline.Load()
+ if t == nil {
+ t = time.Time{}
+ }
+ if err := p.conn.SetReadDeadline(t.(time.Time)); err != nil {
+ return err
+ }
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ err = ErrNoProxyProtocol
+ }
+ }
+
+ // For the purpose of this wrapper shamefully stolen from armon/go-proxyproto
+ // let's act as if there was no error when PROXY protocol is not present.
+ if err == ErrNoProxyProtocol {
+ // but not if it is required that the connection has one
+ if p.ProxyHeaderPolicy == REQUIRE {
+ return err
+ }
+
+ return nil
+ }
+
+ // proxy protocol header was found
+ if err == nil && header != nil {
+ switch p.ProxyHeaderPolicy {
+ case REJECT:
+ // this connection is not allowed to send one
+ return ErrSuperfluousProxyHeader
+ case USE, REQUIRE:
+ if p.Validate != nil {
+ err = p.Validate(header)
+ if err != nil {
+ return err
+ }
+ }
+
+ p.header = header
+ }
+ }
+
+ return err
+}
+
+// ReadFrom implements the io.ReaderFrom ReadFrom method
+func (p *Conn) ReadFrom(r io.Reader) (int64, error) {
+ if rf, ok := p.conn.(io.ReaderFrom); ok {
+ return rf.ReadFrom(r)
+ }
+ return io.Copy(p.conn, r)
+}
+
+// WriteTo implements io.WriterTo
+func (p *Conn) WriteTo(w io.Writer) (int64, error) {
+ p.once.Do(func() { p.readErr = p.readHeader() })
+ if p.readErr != nil {
+ return 0, p.readErr
+ }
+ return p.bufReader.WriteTo(w)
+}
diff --git a/vendor/github.com/pires/go-proxyproto/tlv.go b/vendor/github.com/pires/go-proxyproto/tlv.go
new file mode 100644
index 00000000000..7cc2fb376ed
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/tlv.go
@@ -0,0 +1,132 @@
+// Type-Length-Value splitting and parsing for proxy protocol V2
+// See spec https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt sections 2.2 to 2.7 and
+
+package proxyproto
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+)
+
+const (
+ // Section 2.2
+ PP2_TYPE_ALPN PP2Type = 0x01
+ PP2_TYPE_AUTHORITY PP2Type = 0x02
+ PP2_TYPE_CRC32C PP2Type = 0x03
+ PP2_TYPE_NOOP PP2Type = 0x04
+ PP2_TYPE_UNIQUE_ID PP2Type = 0x05
+ PP2_TYPE_SSL PP2Type = 0x20
+ PP2_SUBTYPE_SSL_VERSION PP2Type = 0x21
+ PP2_SUBTYPE_SSL_CN PP2Type = 0x22
+ PP2_SUBTYPE_SSL_CIPHER PP2Type = 0x23
+ PP2_SUBTYPE_SSL_SIG_ALG PP2Type = 0x24
+ PP2_SUBTYPE_SSL_KEY_ALG PP2Type = 0x25
+ PP2_TYPE_NETNS PP2Type = 0x30
+
+ // Section 2.2.7, reserved types
+ PP2_TYPE_MIN_CUSTOM PP2Type = 0xE0
+ PP2_TYPE_MAX_CUSTOM PP2Type = 0xEF
+ PP2_TYPE_MIN_EXPERIMENT PP2Type = 0xF0
+ PP2_TYPE_MAX_EXPERIMENT PP2Type = 0xF7
+ PP2_TYPE_MIN_FUTURE PP2Type = 0xF8
+ PP2_TYPE_MAX_FUTURE PP2Type = 0xFF
+)
+
+var (
+ ErrTruncatedTLV = errors.New("proxyproto: truncated TLV")
+ ErrMalformedTLV = errors.New("proxyproto: malformed TLV Value")
+ ErrIncompatibleTLV = errors.New("proxyproto: incompatible TLV type")
+)
+
+// PP2Type is the proxy protocol v2 type
+type PP2Type byte
+
+// TLV is a uninterpreted Type-Length-Value for V2 protocol, see section 2.2
+type TLV struct {
+ Type PP2Type
+ Value []byte
+}
+
+// SplitTLVs splits the Type-Length-Value vector, returns the vector or an error.
+func SplitTLVs(raw []byte) ([]TLV, error) {
+ var tlvs []TLV
+ for i := 0; i < len(raw); {
+ tlv := TLV{
+ Type: PP2Type(raw[i]),
+ }
+ if len(raw)-i <= 2 {
+ return nil, ErrTruncatedTLV
+ }
+ tlvLen := int(binary.BigEndian.Uint16(raw[i+1 : i+3])) // Max length = 65K
+ i += 3
+ if i+tlvLen > len(raw) {
+ return nil, ErrTruncatedTLV
+ }
+ // Ignore no-op padding
+ if tlv.Type != PP2_TYPE_NOOP {
+ tlv.Value = make([]byte, tlvLen)
+ copy(tlv.Value, raw[i:i+tlvLen])
+ }
+ i += tlvLen
+ tlvs = append(tlvs, tlv)
+ }
+ return tlvs, nil
+}
+
+// JoinTLVs joins multiple Type-Length-Value records.
+func JoinTLVs(tlvs []TLV) ([]byte, error) {
+ var raw []byte
+ for _, tlv := range tlvs {
+ if len(tlv.Value) > math.MaxUint16 {
+ return nil, fmt.Errorf("proxyproto: cannot format TLV %v with length %d", tlv.Type, len(tlv.Value))
+ }
+ var length [2]byte
+ binary.BigEndian.PutUint16(length[:], uint16(len(tlv.Value)))
+ raw = append(raw, byte(tlv.Type))
+ raw = append(raw, length[:]...)
+ raw = append(raw, tlv.Value...)
+ }
+ return raw, nil
+}
+
+// Registered is true if the type is registered in the spec, see section 2.2
+func (p PP2Type) Registered() bool {
+ switch p {
+ case PP2_TYPE_ALPN,
+ PP2_TYPE_AUTHORITY,
+ PP2_TYPE_CRC32C,
+ PP2_TYPE_NOOP,
+ PP2_TYPE_UNIQUE_ID,
+ PP2_TYPE_SSL,
+ PP2_SUBTYPE_SSL_VERSION,
+ PP2_SUBTYPE_SSL_CN,
+ PP2_SUBTYPE_SSL_CIPHER,
+ PP2_SUBTYPE_SSL_SIG_ALG,
+ PP2_SUBTYPE_SSL_KEY_ALG,
+ PP2_TYPE_NETNS:
+ return true
+ }
+ return false
+}
+
+// App is true if the type is reserved for application specific data, see section 2.2.7
+func (p PP2Type) App() bool {
+ return p >= PP2_TYPE_MIN_CUSTOM && p <= PP2_TYPE_MAX_CUSTOM
+}
+
+// Experiment is true if the type is reserved for temporary experimental use by application developers, see section 2.2.7
+func (p PP2Type) Experiment() bool {
+ return p >= PP2_TYPE_MIN_EXPERIMENT && p <= PP2_TYPE_MAX_EXPERIMENT
+}
+
+// Future is true is the type is reserved for future use, see section 2.2.7
+func (p PP2Type) Future() bool {
+ return p >= PP2_TYPE_MIN_FUTURE
+}
+
+// Spec is true if the type is covered by the spec, see section 2.2 and 2.2.7
+func (p PP2Type) Spec() bool {
+ return p.Registered() || p.App() || p.Experiment() || p.Future()
+}
diff --git a/vendor/github.com/pires/go-proxyproto/v1.go b/vendor/github.com/pires/go-proxyproto/v1.go
new file mode 100644
index 00000000000..0d34ba5264e
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/v1.go
@@ -0,0 +1,243 @@
+package proxyproto
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "net"
+ "net/netip"
+ "strconv"
+ "strings"
+)
+
+const (
+ crlf = "\r\n"
+ separator = " "
+)
+
+func initVersion1() *Header {
+ header := new(Header)
+ header.Version = 1
+ // Command doesn't exist in v1
+ header.Command = PROXY
+ return header
+}
+
+func parseVersion1(reader *bufio.Reader) (*Header, error) {
+ //The header cannot be more than 107 bytes long. Per spec:
+ //
+ // (...)
+ // - worst case (optional fields set to 0xff) :
+ // "PROXY UNKNOWN ffff:f...f:ffff ffff:f...f:ffff 65535 65535\r\n"
+ // => 5 + 1 + 7 + 1 + 39 + 1 + 39 + 1 + 5 + 1 + 5 + 2 = 107 chars
+ //
+ // So a 108-byte buffer is always enough to store all the line and a
+ // trailing zero for string processing.
+ //
+ // It must also be CRLF terminated, as above. The header does not otherwise
+ // contain a CR or LF byte.
+ //
+ // ISSUE #69
+ // We can't use Peek here as it will block trying to fill the buffer, which
+ // will never happen if the header is TCP4 or TCP6 (max. 56 and 104 bytes
+ // respectively) and the server is expected to speak first.
+ //
+ // Similarly, we can't use ReadString or ReadBytes as these will keep reading
+ // until the delimiter is found; an abusive client could easily disrupt a
+ // server by sending a large amount of data that do not contain a LF byte.
+ // Another means of attack would be to start connections and simply not send
+ // data after the initial PROXY signature bytes, accumulating a large
+ // number of blocked goroutines on the server. ReadSlice will also block for
+ // a delimiter when the internal buffer does not fill up.
+ //
+ // A plain Read is also problematic since we risk reading past the end of the
+ // header without being able to easily put the excess bytes back into the reader's
+ // buffer (with the current implementation's design).
+ //
+ // So we use a ReadByte loop, which solves the overflow problem and avoids
+ // reading beyond the end of the header. However, we need one more trick to harden
+ // against partial header attacks (slow loris) - per spec:
+ //
+ // (..) The sender must always ensure that the header is sent at once, so that
+ // the transport layer maintains atomicity along the path to the receiver. The
+ // receiver may be tolerant to partial headers or may simply drop the connection
+ // when receiving a partial header. Recommendation is to be tolerant, but
+ // implementation constraints may not always easily permit this.
+ //
+ // We are subject to such implementation constraints. So we return an error if
+ // the header cannot be fully extracted with a single read of the underlying
+ // reader.
+ buf := make([]byte, 0, 107)
+ for {
+ b, err := reader.ReadByte()
+ if err != nil {
+ return nil, fmt.Errorf(ErrCantReadVersion1Header.Error()+": %v", err)
+ }
+ buf = append(buf, b)
+ if b == '\n' {
+ // End of header found
+ break
+ }
+ if len(buf) == 107 {
+ // No delimiter in first 107 bytes
+ return nil, ErrVersion1HeaderTooLong
+ }
+ if reader.Buffered() == 0 {
+ // Header was not buffered in a single read. Since we can't
+ // differentiate between genuine slow writers and DoS agents,
+ // we abort. On healthy networks, this should never happen.
+ return nil, ErrCantReadVersion1Header
+ }
+ }
+
+ // Check for CR before LF.
+ if len(buf) < 2 || buf[len(buf)-2] != '\r' {
+ return nil, ErrLineMustEndWithCrlf
+ }
+
+ // Check full signature.
+ tokens := strings.Split(string(buf[:len(buf)-2]), separator)
+
+ // Expect at least 2 tokens: "PROXY" and the transport protocol.
+ if len(tokens) < 2 {
+ return nil, ErrCantReadAddressFamilyAndProtocol
+ }
+
+ // Read address family and protocol
+ var transportProtocol AddressFamilyAndProtocol
+ switch tokens[1] {
+ case "TCP4":
+ transportProtocol = TCPv4
+ case "TCP6":
+ transportProtocol = TCPv6
+ case "UNKNOWN":
+ transportProtocol = UNSPEC // doesn't exist in v1 but fits UNKNOWN
+ default:
+ return nil, ErrCantReadAddressFamilyAndProtocol
+ }
+
+ // Expect 6 tokens only when UNKNOWN is not present.
+ if transportProtocol != UNSPEC && len(tokens) < 6 {
+ return nil, ErrCantReadAddressFamilyAndProtocol
+ }
+
+ // When a signature is found, allocate a v1 header with Command set to PROXY.
+ // Command doesn't exist in v1 but set it for other parts of this library
+ // to rely on it for determining connection details.
+ header := initVersion1()
+
+ // Transport protocol has been processed already.
+ header.TransportProtocol = transportProtocol
+
+ // When UNKNOWN, set the command to LOCAL and return early
+ if header.TransportProtocol == UNSPEC {
+ header.Command = LOCAL
+ return header, nil
+ }
+
+ // Otherwise, continue to read addresses and ports
+ sourceIP, err := parseV1IPAddress(header.TransportProtocol, tokens[2])
+ if err != nil {
+ return nil, err
+ }
+ destIP, err := parseV1IPAddress(header.TransportProtocol, tokens[3])
+ if err != nil {
+ return nil, err
+ }
+ sourcePort, err := parseV1PortNumber(tokens[4])
+ if err != nil {
+ return nil, err
+ }
+ destPort, err := parseV1PortNumber(tokens[5])
+ if err != nil {
+ return nil, err
+ }
+ header.SourceAddr = &net.TCPAddr{
+ IP: sourceIP,
+ Port: sourcePort,
+ }
+ header.DestinationAddr = &net.TCPAddr{
+ IP: destIP,
+ Port: destPort,
+ }
+
+ return header, nil
+}
+
+func (header *Header) formatVersion1() ([]byte, error) {
+ // As of version 1, only "TCP4" ( \x54 \x43 \x50 \x34 ) for TCP over IPv4,
+ // and "TCP6" ( \x54 \x43 \x50 \x36 ) for TCP over IPv6 are allowed.
+ var proto string
+ switch header.TransportProtocol {
+ case TCPv4:
+ proto = "TCP4"
+ case TCPv6:
+ proto = "TCP6"
+ default:
+ // Unknown connection (short form)
+ return []byte("PROXY UNKNOWN" + crlf), nil
+ }
+
+ sourceAddr, sourceOK := header.SourceAddr.(*net.TCPAddr)
+ destAddr, destOK := header.DestinationAddr.(*net.TCPAddr)
+ if !sourceOK || !destOK {
+ return nil, ErrInvalidAddress
+ }
+
+ sourceIP, destIP := sourceAddr.IP, destAddr.IP
+ switch header.TransportProtocol {
+ case TCPv4:
+ sourceIP = sourceIP.To4()
+ destIP = destIP.To4()
+ case TCPv6:
+ sourceIP = sourceIP.To16()
+ destIP = destIP.To16()
+ }
+ if sourceIP == nil || destIP == nil {
+ return nil, ErrInvalidAddress
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, 108))
+ buf.Write(SIGV1)
+ buf.WriteString(separator)
+ buf.WriteString(proto)
+ buf.WriteString(separator)
+ buf.WriteString(sourceIP.String())
+ buf.WriteString(separator)
+ buf.WriteString(destIP.String())
+ buf.WriteString(separator)
+ buf.WriteString(strconv.Itoa(sourceAddr.Port))
+ buf.WriteString(separator)
+ buf.WriteString(strconv.Itoa(destAddr.Port))
+ buf.WriteString(crlf)
+
+ return buf.Bytes(), nil
+}
+
+func parseV1PortNumber(portStr string) (int, error) {
+ port, err := strconv.Atoi(portStr)
+ if err != nil || port < 0 || port > 65535 {
+ return 0, ErrInvalidPortNumber
+ }
+ return port, nil
+}
+
+func parseV1IPAddress(protocol AddressFamilyAndProtocol, addrStr string) (net.IP, error) {
+ addr, err := netip.ParseAddr(addrStr)
+ if err != nil {
+ return nil, ErrInvalidAddress
+ }
+
+ switch protocol {
+ case TCPv4:
+ if addr.Is4() {
+ return net.IP(addr.AsSlice()), nil
+ }
+ case TCPv6:
+ if addr.Is6() || addr.Is4In6() {
+ return net.IP(addr.AsSlice()), nil
+ }
+ }
+
+ return nil, ErrInvalidAddress
+}
diff --git a/vendor/github.com/pires/go-proxyproto/v2.go b/vendor/github.com/pires/go-proxyproto/v2.go
new file mode 100644
index 00000000000..74bf3f07714
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/v2.go
@@ -0,0 +1,285 @@
+package proxyproto
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "net"
+)
+
+var (
+ lengthUnspec = uint16(0)
+ lengthV4 = uint16(12)
+ lengthV6 = uint16(36)
+ lengthUnix = uint16(216)
+ lengthUnspecBytes = func() []byte {
+ a := make([]byte, 2)
+ binary.BigEndian.PutUint16(a, lengthUnspec)
+ return a
+ }()
+ lengthV4Bytes = func() []byte {
+ a := make([]byte, 2)
+ binary.BigEndian.PutUint16(a, lengthV4)
+ return a
+ }()
+ lengthV6Bytes = func() []byte {
+ a := make([]byte, 2)
+ binary.BigEndian.PutUint16(a, lengthV6)
+ return a
+ }()
+ lengthUnixBytes = func() []byte {
+ a := make([]byte, 2)
+ binary.BigEndian.PutUint16(a, lengthUnix)
+ return a
+ }()
+ errUint16Overflow = errors.New("proxyproto: uint16 overflow")
+)
+
+type _ports struct {
+ SrcPort uint16
+ DstPort uint16
+}
+
+type _addr4 struct {
+ Src [4]byte
+ Dst [4]byte
+ SrcPort uint16
+ DstPort uint16
+}
+
+type _addr6 struct {
+ Src [16]byte
+ Dst [16]byte
+ _ports
+}
+
+type _addrUnix struct {
+ Src [108]byte
+ Dst [108]byte
+}
+
+func parseVersion2(reader *bufio.Reader) (header *Header, err error) {
+ // Skip first 12 bytes (signature)
+ for i := 0; i < 12; i++ {
+ if _, err = reader.ReadByte(); err != nil {
+ return nil, ErrCantReadProtocolVersionAndCommand
+ }
+ }
+
+ header = new(Header)
+ header.Version = 2
+
+ // Read the 13th byte, protocol version and command
+ b13, err := reader.ReadByte()
+ if err != nil {
+ return nil, ErrCantReadProtocolVersionAndCommand
+ }
+ header.Command = ProtocolVersionAndCommand(b13)
+ if _, ok := supportedCommand[header.Command]; !ok {
+ return nil, ErrUnsupportedProtocolVersionAndCommand
+ }
+
+ // Read the 14th byte, address family and protocol
+ b14, err := reader.ReadByte()
+ if err != nil {
+ return nil, ErrCantReadAddressFamilyAndProtocol
+ }
+ header.TransportProtocol = AddressFamilyAndProtocol(b14)
+ // UNSPEC is only supported when LOCAL is set.
+ if header.TransportProtocol == UNSPEC && header.Command != LOCAL {
+ return nil, ErrUnsupportedAddressFamilyAndProtocol
+ }
+
+ // Make sure there are bytes available as specified in length
+ var length uint16
+ if err := binary.Read(io.LimitReader(reader, 2), binary.BigEndian, &length); err != nil {
+ return nil, ErrCantReadLength
+ }
+ if !header.validateLength(length) {
+ return nil, ErrInvalidLength
+ }
+
+ // Return early if the length is zero, which means that
+ // there's no address information and TLVs present for UNSPEC.
+ if length == 0 {
+ return header, nil
+ }
+
+ if _, err := reader.Peek(int(length)); err != nil {
+ return nil, ErrInvalidLength
+ }
+
+ // Length-limited reader for payload section
+ payloadReader := io.LimitReader(reader, int64(length)).(*io.LimitedReader)
+
+ // Read addresses and ports for protocols other than UNSPEC.
+ // Ignore address information for UNSPEC, and skip straight to read TLVs,
+ // since the length is greater than zero.
+ if header.TransportProtocol != UNSPEC {
+ if header.TransportProtocol.IsIPv4() {
+ var addr _addr4
+ if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil {
+ return nil, ErrInvalidAddress
+ }
+ header.SourceAddr = newIPAddr(header.TransportProtocol, addr.Src[:], addr.SrcPort)
+ header.DestinationAddr = newIPAddr(header.TransportProtocol, addr.Dst[:], addr.DstPort)
+ } else if header.TransportProtocol.IsIPv6() {
+ var addr _addr6
+ if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil {
+ return nil, ErrInvalidAddress
+ }
+ header.SourceAddr = newIPAddr(header.TransportProtocol, addr.Src[:], addr.SrcPort)
+ header.DestinationAddr = newIPAddr(header.TransportProtocol, addr.Dst[:], addr.DstPort)
+ } else if header.TransportProtocol.IsUnix() {
+ var addr _addrUnix
+ if err := binary.Read(payloadReader, binary.BigEndian, &addr); err != nil {
+ return nil, ErrInvalidAddress
+ }
+
+ network := "unix"
+ if header.TransportProtocol.IsDatagram() {
+ network = "unixgram"
+ }
+
+ header.SourceAddr = &net.UnixAddr{
+ Net: network,
+ Name: parseUnixName(addr.Src[:]),
+ }
+ header.DestinationAddr = &net.UnixAddr{
+ Net: network,
+ Name: parseUnixName(addr.Dst[:]),
+ }
+ }
+ }
+
+ // Copy bytes for optional Type-Length-Value vector
+ header.rawTLVs = make([]byte, payloadReader.N) // Allocate minimum size slice
+ if _, err = io.ReadFull(payloadReader, header.rawTLVs); err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ return header, nil
+}
+
+func (header *Header) formatVersion2() ([]byte, error) {
+ var buf bytes.Buffer
+ buf.Write(SIGV2)
+ buf.WriteByte(header.Command.toByte())
+ buf.WriteByte(header.TransportProtocol.toByte())
+ if header.TransportProtocol.IsUnspec() {
+ // For UNSPEC, write no addresses and ports but only TLVs if they are present
+ hdrLen, err := addTLVLen(lengthUnspecBytes, len(header.rawTLVs))
+ if err != nil {
+ return nil, err
+ }
+ buf.Write(hdrLen)
+ } else {
+ var addrSrc, addrDst []byte
+ if header.TransportProtocol.IsIPv4() {
+ hdrLen, err := addTLVLen(lengthV4Bytes, len(header.rawTLVs))
+ if err != nil {
+ return nil, err
+ }
+ buf.Write(hdrLen)
+ sourceIP, destIP, _ := header.IPs()
+ addrSrc = sourceIP.To4()
+ addrDst = destIP.To4()
+ } else if header.TransportProtocol.IsIPv6() {
+ hdrLen, err := addTLVLen(lengthV6Bytes, len(header.rawTLVs))
+ if err != nil {
+ return nil, err
+ }
+ buf.Write(hdrLen)
+ sourceIP, destIP, _ := header.IPs()
+ addrSrc = sourceIP.To16()
+ addrDst = destIP.To16()
+ } else if header.TransportProtocol.IsUnix() {
+ buf.Write(lengthUnixBytes)
+ sourceAddr, destAddr, ok := header.UnixAddrs()
+ if !ok {
+ return nil, ErrInvalidAddress
+ }
+ addrSrc = formatUnixName(sourceAddr.Name)
+ addrDst = formatUnixName(destAddr.Name)
+ }
+
+ if addrSrc == nil || addrDst == nil {
+ return nil, ErrInvalidAddress
+ }
+ buf.Write(addrSrc)
+ buf.Write(addrDst)
+
+ if sourcePort, destPort, ok := header.Ports(); ok {
+ portBytes := make([]byte, 2)
+
+ binary.BigEndian.PutUint16(portBytes, uint16(sourcePort))
+ buf.Write(portBytes)
+
+ binary.BigEndian.PutUint16(portBytes, uint16(destPort))
+ buf.Write(portBytes)
+ }
+ }
+
+ if len(header.rawTLVs) > 0 {
+ buf.Write(header.rawTLVs)
+ }
+
+ return buf.Bytes(), nil
+}
+
+func (header *Header) validateLength(length uint16) bool {
+ if header.TransportProtocol.IsIPv4() {
+ return length >= lengthV4
+ } else if header.TransportProtocol.IsIPv6() {
+ return length >= lengthV6
+ } else if header.TransportProtocol.IsUnix() {
+ return length >= lengthUnix
+ } else if header.TransportProtocol.IsUnspec() {
+ return length >= lengthUnspec
+ }
+ return false
+}
+
+// addTLVLen adds the length of the TLV to the header length or errors on uint16 overflow.
+func addTLVLen(cur []byte, tlvLen int) ([]byte, error) {
+ if tlvLen == 0 {
+ return cur, nil
+ }
+ curLen := binary.BigEndian.Uint16(cur)
+ newLen := int(curLen) + tlvLen
+ if newLen >= 1<<16 {
+ return nil, errUint16Overflow
+ }
+ a := make([]byte, 2)
+ binary.BigEndian.PutUint16(a, uint16(newLen))
+ return a, nil
+}
+
+func newIPAddr(transport AddressFamilyAndProtocol, ip net.IP, port uint16) net.Addr {
+ if transport.IsStream() {
+ return &net.TCPAddr{IP: ip, Port: int(port)}
+ } else if transport.IsDatagram() {
+ return &net.UDPAddr{IP: ip, Port: int(port)}
+ } else {
+ return nil
+ }
+}
+
+func parseUnixName(b []byte) string {
+ i := bytes.IndexByte(b, 0)
+ if i < 0 {
+ return string(b)
+ }
+ return string(b[:i])
+}
+
+func formatUnixName(name string) []byte {
+ n := int(lengthUnix) / 2
+ if len(name) >= n {
+ return []byte(name[:n])
+ }
+ pad := make([]byte, n-len(name))
+ return append([]byte(name), pad...)
+}
diff --git a/vendor/github.com/pires/go-proxyproto/version_cmd.go b/vendor/github.com/pires/go-proxyproto/version_cmd.go
new file mode 100644
index 00000000000..59f20420882
--- /dev/null
+++ b/vendor/github.com/pires/go-proxyproto/version_cmd.go
@@ -0,0 +1,47 @@
+package proxyproto
+
+// ProtocolVersionAndCommand represents the command in proxy protocol v2.
+// Command doesn't exist in v1 but it should be set since other parts of
+// this library may rely on it for determining connection details.
+type ProtocolVersionAndCommand byte
+
+const (
+ // LOCAL represents the LOCAL command in v2 or UNKNOWN transport in v1,
+ // in which case no address information is expected.
+ LOCAL ProtocolVersionAndCommand = '\x20'
+ // PROXY represents the PROXY command in v2 or transport is not UNKNOWN in v1,
+ // in which case valid local/remote address and port information is expected.
+ PROXY ProtocolVersionAndCommand = '\x21'
+)
+
+var supportedCommand = map[ProtocolVersionAndCommand]bool{
+ LOCAL: true,
+ PROXY: true,
+}
+
+// IsLocal returns true if the command in v2 is LOCAL or the transport in v1 is UNKNOWN,
+// i.e. when no address information is expected, false otherwise.
+func (pvc ProtocolVersionAndCommand) IsLocal() bool {
+ return LOCAL == pvc
+}
+
+// IsProxy returns true if the command in v2 is PROXY or the transport in v1 is not UNKNOWN,
+// i.e. when valid local/remote address and port information is expected, false otherwise.
+func (pvc ProtocolVersionAndCommand) IsProxy() bool {
+ return PROXY == pvc
+}
+
+// IsUnspec returns true if the command is unspecified, false otherwise.
+func (pvc ProtocolVersionAndCommand) IsUnspec() bool {
+ return !(pvc.IsLocal() || pvc.IsProxy())
+}
+
+func (pvc ProtocolVersionAndCommand) toByte() byte {
+ if pvc.IsLocal() {
+ return byte(LOCAL)
+ } else if pvc.IsProxy() {
+ return byte(PROXY)
+ }
+
+ return byte(LOCAL)
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
index 2e718c2b1f7..9ef764daecb 100644
--- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
@@ -363,13 +363,11 @@ func Compare(a, b Labels) int {
// Now we know that there is some difference before the end of a and b.
// Go back through the fields and find which field that difference is in.
- firstCharDifferent := i
- for i = 0; ; {
- size, nextI := decodeSize(a.data, i)
- if nextI+size > firstCharDifferent {
- break
- }
+ firstCharDifferent, i := i, 0
+ size, nextI := decodeSize(a.data, i)
+ for nextI+size <= firstCharDifferent {
i = nextI + size
+ size, nextI = decodeSize(a.data, i)
}
// Difference is inside this entry.
aStr, _ := decodeString(a.data, i)
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go
index 38ab0971f40..2c077f86e3b 100644
--- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go
@@ -443,7 +443,6 @@ type StringMatcher interface {
// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher.
// It returns nil if the regexp is not supported.
-// For examples, it will replace `.*foo` with `foo.*` and `.*foo.*` with `(?i)foo`.
func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher {
clearBeginEndText(re)
@@ -599,7 +598,7 @@ func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
suffixCaseSensitive: matchesCaseSensitive,
}
- // We found literals in the middle. We can triggered the fast path only if
+ // We found literals in the middle. We can trigger the fast path only if
// the matches are case sensitive because containsStringMatcher doesn't
// support case insensitive.
case matchesCaseSensitive:
@@ -616,7 +615,7 @@ func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
// containsStringMatcher matches a string if it contains any of the substrings.
// If left and right are not nil, it's a contains operation where left and right must match.
// If left is nil, it's a hasPrefix operation and right must match.
-// Finally if right is nil it's a hasSuffix operation and left must match.
+// Finally, if right is nil it's a hasSuffix operation and left must match.
type containsStringMatcher struct {
// The matcher that must match the left side. Can be nil.
left StringMatcher
diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go
index da66af2f025..d5840374e77 100644
--- a/vendor/github.com/prometheus/prometheus/promql/functions.go
+++ b/vendor/github.com/prometheus/prometheus/promql/functions.go
@@ -1386,6 +1386,9 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot
}
srcLabels[i-3] = src
}
+ if !model.LabelName(dst).IsValid() {
+ panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst))
+ }
val, ws := ev.eval(args[0])
matrix := val.(Matrix)
diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go
index 589b1e5b6bb..296b3d3cadc 100644
--- a/vendor/github.com/prometheus/prometheus/promql/test.go
+++ b/vendor/github.com/prometheus/prometheus/promql/test.go
@@ -46,6 +46,7 @@ var (
patSpace = regexp.MustCompile("[\t ]+")
patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
+ patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
)
const (
@@ -72,7 +73,7 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
}
// RunBuiltinTests runs an acceptance test suite against the provided engine.
-func RunBuiltinTests(t *testing.T, engine engineQuerier) {
+func RunBuiltinTests(t *testing.T, engine QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true
@@ -89,11 +90,19 @@ func RunBuiltinTests(t *testing.T, engine engineQuerier) {
}
// RunTest parses and runs the test against the provided engine.
-func RunTest(t testutil.T, input string, engine engineQuerier) {
+func RunTest(t testutil.T, input string, engine QueryEngine) {
+ require.NoError(t, runTest(t, input, engine))
+}
+
+func runTest(t testutil.T, input string, engine QueryEngine) error {
test, err := newTest(t, input)
- require.NoError(t, err)
+ // Why do this before checking err? newTest() can create the test storage and then return an error,
+ // and we want to make sure to clean that up to avoid leaking goroutines.
defer func() {
+ if test == nil {
+ return
+ }
if test.storage != nil {
test.storage.Close()
}
@@ -102,11 +111,19 @@ func RunTest(t testutil.T, input string, engine engineQuerier) {
}
}()
+ if err != nil {
+ return err
+ }
+
for _, cmd := range test.cmds {
- // TODO(fabxc): aggregate command errors, yield diffs for result
- // comparison errors.
- require.NoError(t, test.exec(cmd, engine))
+ if err := test.exec(cmd, engine); err != nil {
+ // TODO(fabxc): aggregate command errors, yield diffs for result
+ // comparison errors.
+ return err
+ }
}
+
+ return nil
}
// test is a sequence of read and write commands that are run
@@ -137,11 +154,6 @@ func newTest(t testutil.T, input string) (*test, error) {
//go:embed testdata
var testsFs embed.FS
-type engineQuerier interface {
- NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error)
- NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error)
-}
-
func raise(line int, format string, v ...interface{}) error {
return &parser.ParseErr{
LineOffset: line,
@@ -188,15 +200,26 @@ func parseSeries(defLine string, line int) (labels.Labels, []parser.SequenceValu
}
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
- if !patEvalInstant.MatchString(lines[i]) {
- return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at ] ")
- }
- parts := patEvalInstant.FindStringSubmatch(lines[i])
- var (
- mod = parts[1]
- at = parts[2]
- expr = parts[3]
- )
+ instantParts := patEvalInstant.FindStringSubmatch(lines[i])
+ rangeParts := patEvalRange.FindStringSubmatch(lines[i])
+
+ if instantParts == nil && rangeParts == nil {
+ return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_ordered] instant [at ] ' or 'eval[_fail] range from to step '")
+ }
+
+ isInstant := instantParts != nil
+
+ var mod string
+ var expr string
+
+ if isInstant {
+ mod = instantParts[1]
+ expr = instantParts[3]
+ } else {
+ mod = rangeParts[1]
+ expr = rangeParts[5]
+ }
+
_, err := parser.ParseExpr(expr)
if err != nil {
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
@@ -209,15 +232,54 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
return i, nil, err
}
- offset, err := model.ParseDuration(at)
- if err != nil {
- return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
+ formatErr := func(format string, args ...any) error {
+ combinedArgs := []any{expr, i + 1}
+
+ combinedArgs = append(combinedArgs, args...)
+ return fmt.Errorf("error in eval %s (line %v): "+format, combinedArgs...)
+ }
+
+ var cmd *evalCmd
+
+ if isInstant {
+ at := instantParts[2]
+ offset, err := model.ParseDuration(at)
+ if err != nil {
+ return i, nil, formatErr("invalid timestamp definition %q: %s", at, err)
+ }
+ ts := testStartTime.Add(time.Duration(offset))
+ cmd = newInstantEvalCmd(expr, ts, i+1)
+ } else {
+ from := rangeParts[2]
+ to := rangeParts[3]
+ step := rangeParts[4]
+
+ parsedFrom, err := model.ParseDuration(from)
+ if err != nil {
+ return i, nil, formatErr("invalid start timestamp definition %q: %s", from, err)
+ }
+
+ parsedTo, err := model.ParseDuration(to)
+ if err != nil {
+ return i, nil, formatErr("invalid end timestamp definition %q: %s", to, err)
+ }
+
+ if parsedTo < parsedFrom {
+ return i, nil, formatErr("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
+ }
+
+ parsedStep, err := model.ParseDuration(step)
+ if err != nil {
+ return i, nil, formatErr("invalid step definition %q: %s", step, err)
+ }
+
+ cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(parsedFrom)), testStartTime.Add(time.Duration(parsedTo)), time.Duration(parsedStep), i+1)
}
- ts := testStartTime.Add(time.Duration(offset))
- cmd := newEvalCmd(expr, ts, i+1)
switch mod {
case "ordered":
+ // Ordered results are not supported for range queries, but the regex for range query commands does not allow
+ // asserting an ordered result, so we don't need to do any error checking here.
cmd.ordered = true
case "fail":
cmd.fail = true
@@ -240,8 +302,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
}
// Currently, we are not expecting any matrices.
- if len(vals) > 1 {
- return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed")
+ if len(vals) > 1 && isInstant {
+ return i, nil, formatErr("expecting multiple values in instant evaluation not allowed")
}
cmd.expectMetric(j, metric, vals...)
}
@@ -375,8 +437,11 @@ func appendSample(a storage.Appender, s Sample, m labels.Labels) error {
type evalCmd struct {
expr string
start time.Time
+ end time.Time
+ step time.Duration
line int
+ isRange bool // if false, instant query
fail, ordered bool
metrics map[uint64]labels.Labels
@@ -392,7 +457,7 @@ func (e entry) String() string {
return fmt.Sprintf("%d: %s", e.pos, e.vals)
}
-func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
+func newInstantEvalCmd(expr string, start time.Time, line int) *evalCmd {
return &evalCmd{
expr: expr,
start: start,
@@ -403,6 +468,20 @@ func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
}
}
+func newRangeEvalCmd(expr string, start, end time.Time, step time.Duration, line int) *evalCmd {
+ return &evalCmd{
+ expr: expr,
+ start: start,
+ end: end,
+ step: step,
+ line: line,
+ isRange: true,
+
+ metrics: map[uint64]labels.Labels{},
+ expected: map[uint64]entry{},
+ }
+}
+
func (ev *evalCmd) String() string {
return "eval"
}
@@ -425,7 +504,77 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) {
case Matrix:
- return errors.New("received range result on instant evaluation")
+ if ev.ordered {
+ return fmt.Errorf("expected ordered result, but query returned a matrix")
+ }
+
+ if err := assertMatrixSorted(val); err != nil {
+ return err
+ }
+
+ seen := map[uint64]bool{}
+ for _, s := range val {
+ hash := s.Metric.Hash()
+ if _, ok := ev.metrics[hash]; !ok {
+ return fmt.Errorf("unexpected metric %s in result", s.Metric)
+ }
+ seen[hash] = true
+ exp := ev.expected[hash]
+
+ var expectedFloats []FPoint
+ var expectedHistograms []HPoint
+
+ for i, e := range exp.vals {
+ ts := ev.start.Add(time.Duration(i) * ev.step)
+
+ if ts.After(ev.end) {
+ return fmt.Errorf("expected %v points for %s, but query time range cannot return this many points", len(exp.vals), ev.metrics[hash])
+ }
+
+ t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+
+ if e.Histogram != nil {
+ expectedHistograms = append(expectedHistograms, HPoint{T: t, H: e.Histogram})
+ } else if !e.Omitted {
+ expectedFloats = append(expectedFloats, FPoint{T: t, F: e.Value})
+ }
+ }
+
+ if len(expectedFloats) != len(s.Floats) || len(expectedHistograms) != len(s.Histograms) {
+ return fmt.Errorf("expected %v float points and %v histogram points for %s, but got %s", len(expectedFloats), len(expectedHistograms), ev.metrics[hash], formatSeriesResult(s))
+ }
+
+ for i, expected := range expectedFloats {
+ actual := s.Floats[i]
+
+ if expected.T != actual.T {
+ return fmt.Errorf("expected float value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
+ }
+
+ if !almostEqual(actual.F, expected.F, defaultEpsilon) {
+ return fmt.Errorf("expected float value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.F, actual.F, formatSeriesResult(s))
+ }
+ }
+
+ for i, expected := range expectedHistograms {
+ actual := s.Histograms[i]
+
+ if expected.T != actual.T {
+ return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
+ }
+
+ if !actual.H.Equals(expected.H) {
+ return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s))
+ }
+ }
+
+ }
+
+ for hash := range ev.expected {
+ if !seen[hash] {
+ return fmt.Errorf("expected metric %s not found", ev.metrics[hash])
+ }
+ }
case Vector:
seen := map[uint64]bool{}
@@ -440,7 +589,13 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
}
exp0 := exp.vals[0]
expH := exp0.Histogram
- if (expH == nil) != (v.H == nil) || (expH != nil && !expH.Equals(v.H)) {
+ if expH == nil && v.H != nil {
+ return fmt.Errorf("expected float value %v for %s but got histogram %s", exp0, v.Metric, HistogramTestExpression(v.H))
+ }
+ if expH != nil && v.H == nil {
+ return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F)
+ }
+ if expH != nil && !expH.Equals(v.H) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
}
if !almostEqual(exp0.Value, v.F, defaultEpsilon) {
@@ -477,6 +632,21 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return nil
}
+func formatSeriesResult(s Series) string {
+ floatPlural := "s"
+ histogramPlural := "s"
+
+ if len(s.Floats) == 1 {
+ floatPlural = ""
+ }
+
+ if len(s.Histograms) == 1 {
+ histogramPlural = ""
+ }
+
+ return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, s.Histograms)
+}
+
// HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil.
func HistogramTestExpression(h *histogram.FloatHistogram) string {
if h != nil {
@@ -561,7 +731,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
}
// exec processes a single step of the test.
-func (t *test) exec(tc testCommand, engine engineQuerier) error {
+func (t *test) exec(tc testCommand, engine QueryEngine) error {
switch cmd := tc.(type) {
case *clearCmd:
t.clear()
@@ -578,78 +748,137 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
}
case *evalCmd:
- queries, err := atModifierTestCases(cmd.expr, cmd.start)
+ return t.execEval(cmd, engine)
+
+ default:
+ panic("promql.Test.exec: unknown test command type")
+ }
+ return nil
+}
+
+func (t *test) execEval(cmd *evalCmd, engine QueryEngine) error {
+ if cmd.isRange {
+ return t.execRangeEval(cmd, engine)
+ }
+
+ return t.execInstantEval(cmd, engine)
+}
+
+func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error {
+ q, err := engine.NewRangeQuery(t.context, t.storage, nil, cmd.expr, cmd.start, cmd.end, cmd.step)
+ if err != nil {
+ return err
+ }
+ res := q.Exec(t.context)
+ if res.Err != nil {
+ if cmd.fail {
+ return nil
+ }
+
+ return fmt.Errorf("error evaluating query %q (line %d): %w", cmd.expr, cmd.line, res.Err)
+ }
+ if res.Err == nil && cmd.fail {
+ return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
+ }
+ defer q.Close()
+
+ if err := cmd.compareResult(res.Value); err != nil {
+ return fmt.Errorf("error in %s %s (line %d): %w", cmd, cmd.expr, cmd.line, err)
+ }
+
+ return nil
+}
+
+func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error {
+ queries, err := atModifierTestCases(cmd.expr, cmd.start)
+ if err != nil {
+ return err
+ }
+ queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
+ for _, iq := range queries {
+ q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
if err != nil {
return err
}
- queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
- for _, iq := range queries {
- q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
- if err != nil {
- return err
- }
- defer q.Close()
- res := q.Exec(t.context)
- if res.Err != nil {
- if cmd.fail {
- continue
- }
- return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
- }
- if res.Err == nil && cmd.fail {
- return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
- }
- err = cmd.compareResult(res.Value)
- if err != nil {
- return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
- }
-
- // Check query returns same result in range mode,
- // by checking against the middle step.
- q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
- if err != nil {
- return err
- }
- rangeRes := q.Exec(t.context)
- if rangeRes.Err != nil {
- return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
- }
- defer q.Close()
- if cmd.ordered {
- // Ordering isn't defined for range queries.
+ defer q.Close()
+ res := q.Exec(t.context)
+ if res.Err != nil {
+ if cmd.fail {
continue
}
- mat := rangeRes.Value.(Matrix)
- vec := make(Vector, 0, len(mat))
- for _, series := range mat {
- // We expect either Floats or Histograms.
- for _, point := range series.Floats {
- if point.T == timeMilliseconds(iq.evalTime) {
- vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
- break
- }
- }
- for _, point := range series.Histograms {
- if point.T == timeMilliseconds(iq.evalTime) {
- vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
- break
- }
+ return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
+ }
+ if res.Err == nil && cmd.fail {
+ return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
+ }
+ err = cmd.compareResult(res.Value)
+ if err != nil {
+ return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
+ }
+
+ // Check query returns same result in range mode,
+ // by checking against the middle step.
+ q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
+ if err != nil {
+ return err
+ }
+ rangeRes := q.Exec(t.context)
+ if rangeRes.Err != nil {
+ return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
+ }
+ defer q.Close()
+ if cmd.ordered {
+ // Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
+ continue
+ }
+ mat := rangeRes.Value.(Matrix)
+ if err := assertMatrixSorted(mat); err != nil {
+ return err
+ }
+
+ vec := make(Vector, 0, len(mat))
+ for _, series := range mat {
+ // We expect either Floats or Histograms.
+ for _, point := range series.Floats {
+ if point.T == timeMilliseconds(iq.evalTime) {
+ vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
+ break
}
}
- if _, ok := res.Value.(Scalar); ok {
- err = cmd.compareResult(Scalar{V: vec[0].F})
- } else {
- err = cmd.compareResult(vec)
- }
- if err != nil {
- return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
+ for _, point := range series.Histograms {
+ if point.T == timeMilliseconds(iq.evalTime) {
+ vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
+ break
+ }
}
-
}
+ if _, ok := res.Value.(Scalar); ok {
+ err = cmd.compareResult(Scalar{V: vec[0].F})
+ } else {
+ err = cmd.compareResult(vec)
+ }
+ if err != nil {
+ return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
+ }
+ }
- default:
- panic("promql.Test.exec: unknown test command type")
+ return nil
+}
+
+func assertMatrixSorted(m Matrix) error {
+ if len(m) <= 1 {
+ return nil
}
+
+ for i, s := range m[:len(m)-1] {
+ nextIndex := i + 1
+ nextMetric := m[nextIndex].Metric
+
+ if labels.Compare(s.Metric, nextMetric) > 0 {
+ return fmt.Errorf("matrix results should always be sorted by labels, but matrix is not sorted: series at index %v with labels %s sorts before series at index %v with labels %s", nextIndex, nextMetric, i, s.Metric)
+ }
+ }
+
return nil
}
@@ -704,8 +933,6 @@ func parseNumber(s string) (float64, error) {
// LazyLoader lazily loads samples into storage.
// This is specifically implemented for unit testing of rules.
type LazyLoader struct {
- testutil.T
-
loadCmd *loadCmd
storage storage.Storage
@@ -727,13 +954,15 @@ type LazyLoaderOpts struct {
}
// NewLazyLoader returns an initialized empty LazyLoader.
-func NewLazyLoader(t testutil.T, input string, opts LazyLoaderOpts) (*LazyLoader, error) {
+func NewLazyLoader(input string, opts LazyLoaderOpts) (*LazyLoader, error) {
ll := &LazyLoader{
- T: t,
opts: opts,
}
err := ll.parse(input)
- ll.clear()
+ if err != nil {
+ return nil, err
+ }
+ err = ll.clear()
return ll, err
}
@@ -761,15 +990,20 @@ func (ll *LazyLoader) parse(input string) error {
}
// clear the current test storage of all inserted samples.
-func (ll *LazyLoader) clear() {
+func (ll *LazyLoader) clear() error {
if ll.storage != nil {
- err := ll.storage.Close()
- require.NoError(ll.T, err, "Unexpected error while closing test storage.")
+ if err := ll.storage.Close(); err != nil {
+ return fmt.Errorf("closing test storage: %w", err)
+ }
}
if ll.cancelCtx != nil {
ll.cancelCtx()
}
- ll.storage = teststorage.New(ll)
+ var err error
+ ll.storage, err = teststorage.NewWithError()
+ if err != nil {
+ return err
+ }
opts := EngineOpts{
Logger: nil,
@@ -783,6 +1017,7 @@ func (ll *LazyLoader) clear() {
ll.queryEngine = NewEngine(opts)
ll.context, ll.cancelCtx = context.WithCancel(context.Background())
+ return nil
}
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
@@ -836,8 +1071,7 @@ func (ll *LazyLoader) Storage() storage.Storage {
}
// Close closes resources associated with the LazyLoader.
-func (ll *LazyLoader) Close() {
+func (ll *LazyLoader) Close() error {
ll.cancelCtx()
- err := ll.storage.Close()
- require.NoError(ll.T, err, "Unexpected error while closing test storage.")
+ return ll.storage.Close()
}
diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go
index 3a02384aae9..8f6c08c1fa2 100644
--- a/vendor/github.com/prometheus/prometheus/rules/group.go
+++ b/vendor/github.com/prometheus/prometheus/rules/group.go
@@ -566,13 +566,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
}
}
if numOutOfOrder > 0 {
- level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
+ level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder)
}
if numTooOld > 0 {
- level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "numDropped", numTooOld)
+ level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld)
}
if numDuplicates > 0 {
- level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
+ level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates)
}
for metric, lset := range g.seriesInPreviousEval[i] {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go
index 03197588f63..21d72d46b1c 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/db.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go
@@ -588,9 +588,10 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
if err := head.Init(maxBlockTime); err != nil {
return nil, fmt.Errorf("read WAL: %w", err)
}
- // Set the wal to nil to disable all wal operations.
+ // Set the wal and the wbl to nil to disable related operations.
// This is mainly to avoid blocking when closing the head.
head.wal = nil
+ head.wbl = nil
}
db.closers = append(db.closers, head)
@@ -1682,7 +1683,7 @@ func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
deletable = make(map[ulid.ULID]struct{})
for i, block := range blocks {
- // The difference between the first block and this block is larger than
+ // The difference between the first block and this block is greater than or equal to
// the retention period so any blocks after that are added as deletable.
if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime >= db.opts.RetentionDuration {
for _, b := range blocks[i:] {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go
index 2027f14c683..3be729533f4 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/head.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go
@@ -665,6 +665,7 @@ func (h *Head) Init(minValidTime int64) error {
refSeries := make(map[chunks.HeadSeriesRef]*memSeries)
snapshotLoaded := false
+ var chunkSnapshotLoadDuration time.Duration
if h.opts.EnableMemorySnapshotOnShutdown {
level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot")
// If there are any WAL files, there should be at least one WAL file with an index that is current or newer
@@ -695,7 +696,8 @@ func (h *Head) Init(minValidTime int64) error {
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
if err == nil {
snapshotLoaded = true
- level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String())
+ chunkSnapshotLoadDuration = time.Since(start)
+ level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String())
}
if err != nil {
snapIdx, snapOffset = -1, 0
@@ -717,6 +719,8 @@ func (h *Head) Init(minValidTime int64) error {
oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
lastMmapRef chunks.ChunkDiskMapperRef
err error
+
+ mmapChunkReplayDuration time.Duration
)
if snapshotLoaded || h.wal != nil {
// If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded
@@ -740,7 +744,8 @@ func (h *Head) Init(minValidTime int64) error {
return err
}
}
- level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String())
+ mmapChunkReplayDuration = time.Since(mmapChunkReplayStart)
+ level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String())
}
if h.wal == nil {
@@ -862,6 +867,8 @@ func (h *Head) Init(minValidTime int64) error {
"checkpoint_replay_duration", checkpointReplayDuration.String(),
"wal_replay_duration", walReplayDuration.String(),
"wbl_replay_duration", wblReplayDuration.String(),
+ "chunk_snapshot_load_duration", chunkSnapshotLoadDuration.String(),
+ "mmap_chunk_replay_duration", mmapChunkReplayDuration.String(),
"total_replay_duration", totalReplayDuration.String(),
)
@@ -1119,11 +1126,11 @@ func (h *Head) SetMinValidTime(minValidTime int64) {
// Truncate removes old data before mint from the head and WAL.
func (h *Head) Truncate(mint int64) (err error) {
- uninitialized := h.isUninitialized()
+ initialized := h.initialized()
if err := h.truncateMemory(mint); err != nil {
return err
}
- if uninitialized {
+ if !initialized {
return nil
}
return h.truncateWAL(mint)
@@ -1145,9 +1152,9 @@ func (h *Head) truncateMemory(mint int64) (err error) {
}
}()
- uninitialized := h.isUninitialized()
+ initialized := h.initialized()
- if h.MinTime() >= mint && !uninitialized {
+ if h.MinTime() >= mint && initialized {
return nil
}
@@ -1158,7 +1165,7 @@ func (h *Head) truncateMemory(mint int64) (err error) {
defer h.memTruncationInProcess.Store(false)
// We wait for pending queries to end that overlap with this truncation.
- if !uninitialized {
+ if initialized {
h.WaitForPendingReadersInTimeRange(h.MinTime(), mint)
}
@@ -1172,7 +1179,7 @@ func (h *Head) truncateMemory(mint int64) (err error) {
// This was an initial call to Truncate after loading blocks on startup.
// We haven't read back the WAL yet, so do not attempt to truncate it.
- if uninitialized {
+ if !initialized {
return nil
}
@@ -1660,9 +1667,9 @@ func (h *Head) MaxOOOTime() int64 {
return h.maxOOOTime.Load()
}
-// isUninitialized returns true if the head does not yet have a MinTime or MaxTime set, false otherwise.
-func (h *Head) isUninitialized() bool {
- return h.MinTime() == math.MaxInt64 || h.MaxTime() == math.MinInt64
+// initialized returns true if the head has a MinTime set, false otherwise.
+func (h *Head) initialized() bool {
+ return h.MinTime() != math.MaxInt64
}
// compactable returns whether the head has a compactable range.
@@ -1670,7 +1677,7 @@ func (h *Head) isUninitialized() bool {
// Else the head has a compactable range when the head time range is 1.5 times the chunk range.
// The 0.5 acts as a buffer of the appendable window.
func (h *Head) compactable() bool {
- if h.isUninitialized() {
+ if !h.initialized() {
return false
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go
index 054c393e742..8acf93679b9 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go
@@ -138,7 +138,7 @@ func (h *Head) Appender(_ context.Context) storage.Appender {
// The head cache might not have a starting point yet. The init appender
// picks up the first appended timestamp as the base.
- if h.isUninitialized() {
+ if !h.initialized() {
return &initAppender{
head: h,
}
@@ -191,7 +191,7 @@ func (h *Head) appendableMinValidTime() int64 {
// AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head.
// Returns false if Head hasn't been initialized yet and the minimum time isn't known yet.
func (h *Head) AppendableMinValidTime() (int64, bool) {
- if h.isUninitialized() {
+ if !h.initialized() {
return 0, false
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go
index 4ad1bb23653..a16cd5fc749 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go
@@ -149,22 +149,23 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head
r := NewReader(sgmReader)
var (
- series []record.RefSeries
- samples []record.RefSample
- histogramSamples []record.RefHistogramSample
- tstones []tombstones.Stone
- exemplars []record.RefExemplar
- metadata []record.RefMetadata
- st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function.
- dec = record.NewDecoder(st)
- enc record.Encoder
- buf []byte
- recs [][]byte
+ series []record.RefSeries
+ samples []record.RefSample
+ histogramSamples []record.RefHistogramSample
+ floatHistogramSamples []record.RefFloatHistogramSample
+ tstones []tombstones.Stone
+ exemplars []record.RefExemplar
+ metadata []record.RefMetadata
+ st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function.
+ dec = record.NewDecoder(st)
+ enc record.Encoder
+ buf []byte
+ recs [][]byte
latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata)
)
for r.Next() {
- series, samples, histogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0]
+ series, samples, histogramSamples, floatHistogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], floatHistogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0]
// We don't reset the buffer since we batch up multiple records
// before writing them to the checkpoint.
@@ -224,8 +225,26 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head
if len(repl) > 0 {
buf = enc.HistogramSamples(repl, buf)
}
- stats.TotalSamples += len(samples)
- stats.DroppedSamples += len(samples) - len(repl)
+ stats.TotalSamples += len(histogramSamples)
+ stats.DroppedSamples += len(histogramSamples) - len(repl)
+
+ case record.FloatHistogramSamples:
+ floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples)
+ if err != nil {
+ return nil, fmt.Errorf("decode float histogram samples: %w", err)
+ }
+ // Drop irrelevant floatHistogramSamples in place.
+ repl := floatHistogramSamples[:0]
+ for _, fh := range floatHistogramSamples {
+ if fh.T >= mint {
+ repl = append(repl, fh)
+ }
+ }
+ if len(repl) > 0 {
+ buf = enc.FloatHistogramSamples(repl, buf)
+ }
+ stats.TotalSamples += len(floatHistogramSamples)
+ stats.DroppedSamples += len(floatHistogramSamples) - len(repl)
case record.Tombstones:
tstones, err = dec.Tombstones(rec, tstones)
diff --git a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
index 5d95437e99a..7d1f9dda242 100644
--- a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
+++ b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
@@ -14,6 +14,7 @@
package teststorage
import (
+ "fmt"
"os"
"time"
@@ -30,8 +31,18 @@ import (
// New returns a new TestStorage for testing purposes
// that removes all associated files on closing.
func New(t testutil.T) *TestStorage {
+ stor, err := NewWithError()
+ require.NoError(t, err)
+ return stor
+}
+
+// NewWithError returns a new TestStorage for user facing tests, which reports
+// errors directly.
+func NewWithError() (*TestStorage, error) {
dir, err := os.MkdirTemp("", "test_storage")
- require.NoError(t, err, "unexpected error while opening test directory")
+ if err != nil {
+ return nil, fmt.Errorf("opening test directory: %w", err)
+ }
// Tests just load data for a series sequentially. Thus we
// need a long appendable window.
@@ -41,13 +52,17 @@ func New(t testutil.T) *TestStorage {
opts.RetentionDuration = 0
opts.EnableNativeHistograms = true
db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats())
- require.NoError(t, err, "unexpected error while opening test storage")
+ if err != nil {
+ return nil, fmt.Errorf("opening test storage: %w", err)
+ }
reg := prometheus.NewRegistry()
eMetrics := tsdb.NewExemplarMetrics(reg)
es, err := tsdb.NewCircularExemplarStorage(10, eMetrics)
- require.NoError(t, err, "unexpected error while opening test exemplar storage")
- return &TestStorage{DB: db, exemplarStorage: es, dir: dir}
+ if err != nil {
+ return nil, fmt.Errorf("opening test exemplar storage: %w", err)
+ }
+ return &TestStorage{DB: db, exemplarStorage: es, dir: dir}, nil
}
type TestStorage struct {
diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
index b56026e45e1..dc22365073a 100644
--- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
+++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
@@ -882,6 +882,9 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
warnings := set.Warnings()
for set.Next() {
+ if err := ctx.Err(); err != nil {
+ return apiFuncResult{nil, returnAPIError(err), warnings, closer}
+ }
metrics = append(metrics, set.At().Labels())
if len(metrics) >= limit {
diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go
index c3bd9a1eeb5..6404aaf157d 100644
--- a/vendor/golang.org/x/net/http/httpproxy/proxy.go
+++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go
@@ -149,10 +149,7 @@ func parseProxy(proxy string) (*url.URL, error) {
}
proxyURL, err := url.Parse(proxy)
- if err != nil ||
- (proxyURL.Scheme != "http" &&
- proxyURL.Scheme != "https" &&
- proxyURL.Scheme != "socks5") {
+ if err != nil || proxyURL.Scheme == "" || proxyURL.Host == "" {
// proxy was bogus. Try prepending "http://" to it and
// see if that parses correctly. If not, we fall
// through and complain about the original one.
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index e2b298d8593..43557ab7e97 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -1564,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
if size > remainSize {
hdec.SetEmitEnabled(false)
mh.Truncated = true
+ remainSize = 0
return
}
remainSize -= size
@@ -1576,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
var hc headersOrContinuation = hf
for {
frag := hc.HeaderBlockFragment()
+
+ // Avoid parsing large amounts of headers that we will then discard.
+ // If the sender exceeds the max header list size by too much,
+ // skip parsing the fragment and close the connection.
+ //
+ // "Too much" is either any CONTINUATION frame after we've already
+ // exceeded the max header list size (in which case remainSize is 0),
+ // or a frame whose encoded size is more than twice the remaining
+ // header list bytes we're willing to accept.
+ if int64(len(frag)) > int64(2*remainSize) {
+ if VerboseLogs {
+ log.Printf("http2: header list too large")
+ }
+ // It would be nice to send a RST_STREAM before sending the GOAWAY,
+ // but the structure of the server's frame writer makes this difficult.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+
+ // Also close the connection after any CONTINUATION frame following an
+ // invalid header, since we stop tracking the size of the headers after
+ // an invalid one.
+ if invalid != nil {
+ if VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ // It would be nice to send a RST_STREAM before sending the GOAWAY,
+ // but the structure of the server's frame writer makes this difficult.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+
if _, err := hdec.Write(frag); err != nil {
return nil, ConnectionError(ErrCodeCompression)
}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
index 684d984fd96..3b9f06b9624 100644
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) {
}
}
-var errClosedPipeWrite = errors.New("write on closed buffer")
+var (
+ errClosedPipeWrite = errors.New("write on closed buffer")
+ errUninitializedPipeWrite = errors.New("write on uninitialized buffer")
+)
// Write copies bytes from p into the buffer and wakes a reader.
// It is an error to write more data than the buffer can hold.
@@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) {
if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
+ // pipe.setBuffer is never invoked, leaving the buffer uninitialized.
+ // We shouldn't try to write to an uninitialized pipe,
+ // but returning an error is better than panicking.
+ if p.b == nil {
+ return 0, errUninitializedPipeWrite
+ }
return p.b.Write(d)
}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index ae94c6408d5..ce2e8b40eee 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -124,6 +124,7 @@ type Server struct {
// IdleTimeout specifies how long until idle clients should be
// closed with a GOAWAY frame. PING frames are not considered
// activity for the purposes of IdleTimeout.
+ // If zero or negative, there is no timeout.
IdleTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow
@@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// passes the connection off to us with the deadline already set.
// Write deadlines are set per stream in serverConn.newStream.
// Disarm the net.Conn write deadline here.
- if sc.hs.WriteTimeout != 0 {
+ if sc.hs.WriteTimeout > 0 {
sc.conn.SetWriteDeadline(time.Time{})
}
@@ -924,7 +925,7 @@ func (sc *serverConn) serve() {
sc.setConnState(http.StateActive)
sc.setConnState(http.StateIdle)
- if sc.srv.IdleTimeout != 0 {
+ if sc.srv.IdleTimeout > 0 {
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
@@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
delete(sc.streams, st.id)
if len(sc.streams) == 0 {
sc.setConnState(http.StateIdle)
- if sc.srv.IdleTimeout != 0 {
+ if sc.srv.IdleTimeout > 0 {
sc.idleTimer.Reset(sc.srv.IdleTimeout)
}
if h1ServerKeepAlivesDisabled(sc.hs) {
@@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// similar to how the http1 server works. Here it's
// technically more like the http1 Server's ReadHeaderTimeout
// (in Go 1.8), though. That's a more sane option anyway.
- if sc.hs.ReadTimeout != 0 {
+ if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
@@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
// Disable any read deadline set by the net/http package
// prior to the upgrade.
- if sc.hs.ReadTimeout != 0 {
+ if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
}
@@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
- if sc.hs.WriteTimeout != 0 {
+ if sc.hs.WriteTimeout > 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go
new file mode 100644
index 00000000000..61075bd16d3
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/testsync.go
@@ -0,0 +1,331 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package http2
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// testSyncHooks coordinates goroutines in tests.
+//
+// For example, a call to ClientConn.RoundTrip involves several goroutines, including:
+// - the goroutine running RoundTrip;
+// - the clientStream.doRequest goroutine, which writes the request; and
+// - the clientStream.readLoop goroutine, which reads the response.
+//
+// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines
+// are blocked waiting for some condition such as reading the Request.Body or waiting for
+// flow control to become available.
+//
+// The testSyncHooks also manage timers and synthetic time in tests.
+// This permits us to, for example, start a request and cause it to time out waiting for
+// response headers without resorting to time.Sleep calls.
+type testSyncHooks struct {
+ // active/inactive act as a mutex and condition variable.
+ //
+ // - neither chan contains a value: testSyncHooks is locked.
+ // - active contains a value: unlocked, and at least one goroutine is not blocked
+ // - inactive contains a value: unlocked, and all goroutines are blocked
+ active chan struct{}
+ inactive chan struct{}
+
+ // goroutine counts
+ total int // total goroutines
+ condwait map[*sync.Cond]int // blocked in sync.Cond.Wait
+ blocked []*testBlockedGoroutine // otherwise blocked
+
+ // fake time
+ now time.Time
+ timers []*fakeTimer
+
+ // Transport testing: Report various events.
+ newclientconn func(*ClientConn)
+ newstream func(*clientStream)
+}
+
+// testBlockedGoroutine is a blocked goroutine.
+type testBlockedGoroutine struct {
+ f func() bool // blocked until f returns true
+ ch chan struct{} // closed when unblocked
+}
+
+func newTestSyncHooks() *testSyncHooks {
+ h := &testSyncHooks{
+ active: make(chan struct{}, 1),
+ inactive: make(chan struct{}, 1),
+ condwait: map[*sync.Cond]int{},
+ }
+ h.inactive <- struct{}{}
+ h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
+ return h
+}
+
+// lock acquires the testSyncHooks mutex.
+func (h *testSyncHooks) lock() {
+ select {
+ case <-h.active:
+ case <-h.inactive:
+ }
+}
+
+// waitInactive waits for all goroutines to become inactive.
+func (h *testSyncHooks) waitInactive() {
+ for {
+ <-h.inactive
+ if !h.unlock() {
+ break
+ }
+ }
+}
+
+// unlock releases the testSyncHooks mutex.
+// It reports whether any goroutines are active.
+func (h *testSyncHooks) unlock() (active bool) {
+ // Look for a blocked goroutine which can be unblocked.
+ blocked := h.blocked[:0]
+ unblocked := false
+ for _, b := range h.blocked {
+ if !unblocked && b.f() {
+ unblocked = true
+ close(b.ch)
+ } else {
+ blocked = append(blocked, b)
+ }
+ }
+ h.blocked = blocked
+
+ // Count goroutines blocked on condition variables.
+ condwait := 0
+ for _, count := range h.condwait {
+ condwait += count
+ }
+
+ if h.total > condwait+len(blocked) {
+ h.active <- struct{}{}
+ return true
+ } else {
+ h.inactive <- struct{}{}
+ return false
+ }
+}
+
+// goRun starts a new goroutine.
+func (h *testSyncHooks) goRun(f func()) {
+ h.lock()
+ h.total++
+ h.unlock()
+ go func() {
+ defer func() {
+ h.lock()
+ h.total--
+ h.unlock()
+ }()
+ f()
+ }()
+}
+
+// blockUntil indicates that a goroutine is blocked waiting for some condition to become true.
+// It waits until f returns true before proceeding.
+//
+// Example usage:
+//
+// h.blockUntil(func() bool {
+// // Is the context done yet?
+// select {
+// case <-ctx.Done():
+// default:
+// return false
+// }
+// return true
+// })
+// // Wait for the context to become done.
+// <-ctx.Done()
+//
+// The function f passed to blockUntil must be non-blocking and idempotent.
+func (h *testSyncHooks) blockUntil(f func() bool) {
+ if f() {
+ return
+ }
+ ch := make(chan struct{})
+ h.lock()
+ h.blocked = append(h.blocked, &testBlockedGoroutine{
+ f: f,
+ ch: ch,
+ })
+ h.unlock()
+ <-ch
+}
+
+// broadcast is sync.Cond.Broadcast.
+func (h *testSyncHooks) condBroadcast(cond *sync.Cond) {
+ h.lock()
+ delete(h.condwait, cond)
+ h.unlock()
+ cond.Broadcast()
+}
+
+// broadcast is sync.Cond.Wait.
+func (h *testSyncHooks) condWait(cond *sync.Cond) {
+ h.lock()
+ h.condwait[cond]++
+ h.unlock()
+}
+
+// newTimer creates a new fake timer.
+func (h *testSyncHooks) newTimer(d time.Duration) timer {
+ h.lock()
+ defer h.unlock()
+ t := &fakeTimer{
+ hooks: h,
+ when: h.now.Add(d),
+ c: make(chan time.Time),
+ }
+ h.timers = append(h.timers, t)
+ return t
+}
+
+// afterFunc creates a new fake AfterFunc timer.
+func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer {
+ h.lock()
+ defer h.unlock()
+ t := &fakeTimer{
+ hooks: h,
+ when: h.now.Add(d),
+ f: f,
+ }
+ h.timers = append(h.timers, t)
+ return t
+}
+
+func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ ctx, cancel := context.WithCancel(ctx)
+ t := h.afterFunc(d, cancel)
+ return ctx, func() {
+ t.Stop()
+ cancel()
+ }
+}
+
+func (h *testSyncHooks) timeUntilEvent() time.Duration {
+ h.lock()
+ defer h.unlock()
+ var next time.Time
+ for _, t := range h.timers {
+ if next.IsZero() || t.when.Before(next) {
+ next = t.when
+ }
+ }
+ if d := next.Sub(h.now); d > 0 {
+ return d
+ }
+ return 0
+}
+
+// advance advances time and causes synthetic timers to fire.
+func (h *testSyncHooks) advance(d time.Duration) {
+ h.lock()
+ defer h.unlock()
+ h.now = h.now.Add(d)
+ timers := h.timers[:0]
+ for _, t := range h.timers {
+ t := t // remove after go.mod depends on go1.22
+ t.mu.Lock()
+ switch {
+ case t.when.After(h.now):
+ timers = append(timers, t)
+ case t.when.IsZero():
+ // stopped timer
+ default:
+ t.when = time.Time{}
+ if t.c != nil {
+ close(t.c)
+ }
+ if t.f != nil {
+ h.total++
+ go func() {
+ defer func() {
+ h.lock()
+ h.total--
+ h.unlock()
+ }()
+ t.f()
+ }()
+ }
+ }
+ t.mu.Unlock()
+ }
+ h.timers = timers
+}
+
+// A timer wraps a time.Timer, or a synthetic equivalent in tests.
+// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires.
+type timer interface {
+ C() <-chan time.Time
+ Stop() bool
+ Reset(d time.Duration) bool
+}
+
+// timeTimer implements timer using real time.
+type timeTimer struct {
+ t *time.Timer
+ c chan time.Time
+}
+
+// newTimeTimer creates a new timer using real time.
+func newTimeTimer(d time.Duration) timer {
+ ch := make(chan time.Time)
+ t := time.AfterFunc(d, func() {
+ close(ch)
+ })
+ return &timeTimer{t, ch}
+}
+
+// newTimeAfterFunc creates an AfterFunc timer using real time.
+func newTimeAfterFunc(d time.Duration, f func()) timer {
+ return &timeTimer{
+ t: time.AfterFunc(d, f),
+ }
+}
+
+func (t timeTimer) C() <-chan time.Time { return t.c }
+func (t timeTimer) Stop() bool { return t.t.Stop() }
+func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) }
+
+// fakeTimer implements timer using fake time.
+type fakeTimer struct {
+ hooks *testSyncHooks
+
+ mu sync.Mutex
+ when time.Time // when the timer will fire
+ c chan time.Time // closed when the timer fires; mutually exclusive with f
+ f func() // called when the timer fires; mutually exclusive with c
+}
+
+func (t *fakeTimer) C() <-chan time.Time { return t.c }
+
+func (t *fakeTimer) Stop() bool {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ stopped := t.when.IsZero()
+ t.when = time.Time{}
+ return stopped
+}
+
+func (t *fakeTimer) Reset(d time.Duration) bool {
+ if t.c != nil || t.f == nil {
+ panic("fakeTimer only supports Reset on AfterFunc timers")
+ }
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.hooks.lock()
+ defer t.hooks.unlock()
+ active := !t.when.IsZero()
+ t.when = t.hooks.now.Add(d)
+ if !active {
+ t.hooks.timers = append(t.hooks.timers, t)
+ }
+ return active
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index c2a5b44b3d6..ce375c8c753 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -147,6 +147,12 @@ type Transport struct {
// waiting for their turn.
StrictMaxConcurrentStreams bool
+ // IdleConnTimeout is the maximum amount of time an idle
+ // (keep-alive) connection will remain idle before closing
+ // itself.
+ // Zero means no limit.
+ IdleConnTimeout time.Duration
+
// ReadIdleTimeout is the timeout after which a health check using ping
// frame will be carried out if no frame is received on the connection.
// Note that a ping response will is considered a received frame, so if
@@ -178,6 +184,8 @@ type Transport struct {
connPoolOnce sync.Once
connPoolOrDef ClientConnPool // non-nil version of ConnPool
+
+ syncHooks *testSyncHooks
}
func (t *Transport) maxHeaderListSize() uint32 {
@@ -302,7 +310,7 @@ type ClientConn struct {
readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
- idleTimer *time.Timer
+ idleTimer timer
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -344,6 +352,60 @@ type ClientConn struct {
werr error // first write error that has occurred
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
+
+ syncHooks *testSyncHooks // can be nil
+}
+
+// Hook points used for testing.
+// Outside of tests, cc.syncHooks is nil and these all have minimal implementations.
+// Inside tests, see the testSyncHooks function docs.
+
+// goRun starts a new goroutine.
+func (cc *ClientConn) goRun(f func()) {
+ if cc.syncHooks != nil {
+ cc.syncHooks.goRun(f)
+ return
+ }
+ go f()
+}
+
+// condBroadcast is cc.cond.Broadcast.
+func (cc *ClientConn) condBroadcast() {
+ if cc.syncHooks != nil {
+ cc.syncHooks.condBroadcast(cc.cond)
+ }
+ cc.cond.Broadcast()
+}
+
+// condWait is cc.cond.Wait.
+func (cc *ClientConn) condWait() {
+ if cc.syncHooks != nil {
+ cc.syncHooks.condWait(cc.cond)
+ }
+ cc.cond.Wait()
+}
+
+// newTimer creates a new time.Timer, or a synthetic timer in tests.
+func (cc *ClientConn) newTimer(d time.Duration) timer {
+ if cc.syncHooks != nil {
+ return cc.syncHooks.newTimer(d)
+ }
+ return newTimeTimer(d)
+}
+
+// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
+func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer {
+ if cc.syncHooks != nil {
+ return cc.syncHooks.afterFunc(d, f)
+ }
+ return newTimeAfterFunc(d, f)
+}
+
+func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ if cc.syncHooks != nil {
+ return cc.syncHooks.contextWithTimeout(ctx, d)
+ }
+ return context.WithTimeout(ctx, d)
}
// clientStream is the state for a single HTTP/2 stream. One of these
@@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) {
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
if cs.cc.cond != nil {
// Wake up writeRequestBody if it is waiting on flow control.
- cs.cc.cond.Broadcast()
+ cs.cc.condBroadcast()
}
}
@@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() {
defer cc.mu.Unlock()
if cs.reqBody != nil && cs.reqBodyClosed == nil {
cs.closeReqBodyLocked()
- cc.cond.Broadcast()
+ cc.condBroadcast()
}
}
@@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() {
}
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
- go func() {
+ cs.cc.goRun(func() {
cs.reqBody.Close()
close(reqBodyClosed)
- }()
+ })
}
type stickyErrWriter struct {
@@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) {
return net.JoinHostPort(host, port)
}
-var retryBackoffHook func(time.Duration) *time.Timer
-
-func backoffNewTimer(d time.Duration) *time.Timer {
- if retryBackoffHook != nil {
- return retryBackoffHook(d)
- }
- return time.NewTimer(d)
-}
-
// RoundTripOpt is like RoundTrip, but takes options.
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
@@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- timer := backoffNewTimer(d)
+ var tm timer
+ if t.syncHooks != nil {
+ tm = t.syncHooks.newTimer(d)
+ t.syncHooks.blockUntil(func() bool {
+ select {
+ case <-tm.C():
+ case <-req.Context().Done():
+ default:
+ return false
+ }
+ return true
+ })
+ } else {
+ tm = newTimeTimer(d)
+ }
select {
- case <-timer.C:
+ case <-tm.C():
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
- timer.Stop()
+ tm.Stop()
err = req.Context().Err()
}
}
@@ -658,6 +725,9 @@ func canRetryError(err error) bool {
}
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
+ if t.syncHooks != nil {
+ return t.newClientConn(nil, singleUse, t.syncHooks)
+ }
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
@@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil {
return nil, err
}
- return t.newClientConn(tconn, singleUse)
+ return t.newClientConn(tconn, singleUse, nil)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
@@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- return t.newClientConn(c, t.disableKeepAlives())
+ return t.newClientConn(c, t.disableKeepAlives(), nil)
}
-func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) {
cc := &ClientConn{
t: t,
tconn: c,
@@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
+ syncHooks: hooks,
+ }
+ if hooks != nil {
+ hooks.newclientconn(cc)
+ c = cc.tconn
}
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
- cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
+ cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout)
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
return nil, cc.werr
}
- go cc.readLoop()
+ cc.goRun(cc.readLoop)
return cc, nil
}
@@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() {
pingTimeout := cc.t.pingTimeout()
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
// Wait for all in-flight streams to complete or connection to close
done := make(chan struct{})
cancelled := false // guarded by cc.mu
- go func() {
+ cc.goRun(func() {
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
if cancelled {
break
}
- cc.cond.Wait()
+ cc.condWait()
}
- }()
+ })
shutdownEnterWaitStateHook()
select {
case <-done:
@@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
cc.mu.Lock()
// Free the goroutine above
cancelled = true
- cc.cond.Broadcast()
+ cc.condBroadcast()
cc.mu.Unlock()
return ctx.Err()
}
@@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) {
for _, cs := range cc.streams {
cs.abortStreamLocked(err)
}
- cc.cond.Broadcast()
+ cc.condBroadcast()
cc.mu.Unlock()
cc.closeConn()
}
@@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() {
}
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ return cc.roundTrip(req, nil)
+}
+
+func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) {
ctx := req.Context()
cs := &clientStream{
cc: cc,
@@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
respHeaderRecv: make(chan struct{}),
donec: make(chan struct{}),
}
- go cs.doRequest(req)
+ cc.goRun(func() {
+ cs.doRequest(req)
+ })
waitDone := func() error {
+ if cc.syncHooks != nil {
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case <-cs.donec:
+ case <-ctx.Done():
+ case <-cs.reqCancel:
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case <-cs.donec:
return nil
@@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return err
}
+ if streamf != nil {
+ streamf(cs)
+ }
+
for {
+ if cc.syncHooks != nil {
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case <-cs.respHeaderRecv:
+ case <-cs.abort:
+ case <-ctx.Done():
+ case <-cs.reqCancel:
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case <-cs.respHeaderRecv:
return handleResponseHeaders()
@@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
if cc.reqHeaderMu == nil {
panic("RoundTrip on uninitialized ClientConn") // for tests
}
+ var newStreamHook func(*clientStream)
+ if cc.syncHooks != nil {
+ newStreamHook = cc.syncHooks.newstream
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case cc.reqHeaderMu <- struct{}{}:
+ <-cc.reqHeaderMu
+ case <-cs.reqCancel:
+ case <-ctx.Done():
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case cc.reqHeaderMu <- struct{}{}:
case <-cs.reqCancel:
@@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
}
cc.mu.Unlock()
+ if newStreamHook != nil {
+ newStreamHook(cs)
+ }
+
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
if !cc.t.disableCompression() &&
req.Header.Get("Accept-Encoding") == "" &&
@@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
+ timer := cc.newTimer(d)
defer timer.Stop()
- respHeaderTimer = timer.C
+ respHeaderTimer = timer.C()
respHeaderRecv = cs.respHeaderRecv
}
// Wait until the peer half-closes its end of the stream,
// or until the request is aborted (via context, error, or otherwise),
// whichever comes first.
for {
+ if cc.syncHooks != nil {
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case <-cs.peerClosed:
+ case <-respHeaderTimer:
+ case <-respHeaderRecv:
+ case <-cs.abort:
+ case <-ctx.Done():
+ case <-cs.reqCancel:
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case <-cs.peerClosed:
return nil
@@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
return nil
}
cc.pendingRequests++
- cc.cond.Wait()
+ cc.condWait()
cc.pendingRequests--
select {
case <-cs.abort:
@@ -1871,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
cs.flow.take(take)
return take, nil
}
- cc.cond.Wait()
+ cc.condWait()
+ }
+}
+
+func validateHeaders(hdrs http.Header) string {
+ for k, vv := range hdrs {
+ if !httpguts.ValidHeaderFieldName(k) {
+ return fmt.Sprintf("name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // Don't include the value in the error,
+ // because it may be sensitive.
+ return fmt.Sprintf("value for header %q", k)
+ }
+ }
}
+ return ""
}
var errNilRequestURL = errors.New("http2: Request.URI is nil")
@@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
}
}
- // Check for any invalid headers and return an error before we
+ // Check for any invalid headers+trailers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
- for k, vv := range req.Header {
- if !httpguts.ValidHeaderFieldName(k) {
- return nil, fmt.Errorf("invalid HTTP header name %q", k)
- }
- for _, v := range vv {
- if !httpguts.ValidHeaderFieldValue(v) {
- // Don't include the value in the error, because it may be sensitive.
- return nil, fmt.Errorf("invalid HTTP header value for header %q", k)
- }
- }
+ if err := validateHeaders(req.Header); err != "" {
+ return nil, fmt.Errorf("invalid HTTP header %s", err)
+ }
+ if err := validateHeaders(req.Trailer); err != "" {
+ return nil, fmt.Errorf("invalid HTTP trailer %s", err)
}
enumerateHeaders := func(f func(name, value string)) {
@@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
- cc.cond.Broadcast()
+ cc.condBroadcast()
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
@@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() {
cs.abortStreamLocked(err)
}
}
- cc.cond.Broadcast()
+ cc.condBroadcast()
cc.mu.Unlock()
}
@@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
readIdleTimeout := cc.t.ReadIdleTimeout
- var t *time.Timer
+ var t timer
if readIdleTimeout != 0 {
- t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
- defer t.Stop()
+ t = cc.afterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
})
return nil
}
- if !cs.firstByte {
+ if !cs.pastHeaders {
cc.logf("protocol error: received DATA before a HEADERS frame")
rl.endStreamError(cs, StreamError{
StreamID: f.StreamID,
@@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
for _, cs := range cc.streams {
cs.flow.add(delta)
}
- cc.cond.Broadcast()
+ cc.condBroadcast()
cc.initialWindowSize = s.Val
case SettingHeaderTableSize:
@@ -2922,7 +3076,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
return ConnectionError(ErrCodeFlowControl)
}
- cc.cond.Broadcast()
+ cc.condBroadcast()
return nil
}
@@ -2964,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
}
cc.mu.Unlock()
}
- errc := make(chan error, 1)
- go func() {
+ var pingError error
+ errc := make(chan struct{})
+ cc.goRun(func() {
cc.wmu.Lock()
defer cc.wmu.Unlock()
- if err := cc.fr.WritePing(false, p); err != nil {
- errc <- err
+ if pingError = cc.fr.WritePing(false, p); pingError != nil {
+ close(errc)
return
}
- if err := cc.bw.Flush(); err != nil {
- errc <- err
+ if pingError = cc.bw.Flush(); pingError != nil {
+ close(errc)
return
}
- }()
+ })
+ if cc.syncHooks != nil {
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case <-c:
+ case <-errc:
+ case <-ctx.Done():
+ case <-cc.readerDone:
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case <-c:
return nil
- case err := <-errc:
- return err
+ case <-errc:
+ return pingError
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
@@ -3150,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err
}
func (t *Transport) idleConnTimeout() time.Duration {
+ // to keep things backwards compatible, we use non-zero values of
+ // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying
+ // http1 transport, followed by 0
+ if t.IdleConnTimeout != 0 {
+ return t.IdleConnTimeout
+ }
+
if t.t1 != nil {
return t.t1.IdleConnTimeout
}
+
return 0
}
diff --git a/vendor/google.golang.org/grpc/experimental/experimental.go b/vendor/google.golang.org/grpc/experimental/experimental.go
new file mode 100644
index 00000000000..de7f13a2210
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/experimental.go
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package experimental is a collection of experimental features that might
+// have some rough edges to them. Housing experimental features in this package
+// results in a user accessing these APIs as `experimental.Foo`, thereby making
+// it explicit that the feature is experimental and using them in production
+// code is at their own risk.
+//
+// All APIs in this package are experimental.
+package experimental
+
+import (
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/internal"
+)
+
+// WithRecvBufferPool returns a grpc.DialOption that configures the use of
+// bufferPool for parsing incoming messages on a grpc.ClientConn. Depending on
+// the application's workload, this could result in reduced memory allocation.
+//
+// If you are unsure about how to implement a memory pool but want to utilize
+// one, begin with grpc.NewSharedBufferPool.
+//
+// Note: The shared buffer pool feature will not be active if any of the
+// following options are used: WithStatsHandler, EnableTracing, or binary
+// logging. In such cases, the shared buffer pool will be ignored.
+//
+// Note: It is not recommended to use the shared buffer pool when compression is
+// enabled.
+func WithRecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.DialOption {
+ return internal.WithRecvBufferPool.(func(grpc.SharedBufferPool) grpc.DialOption)(bufferPool)
+}
+
+// RecvBufferPool returns a grpc.ServerOption that configures the server to use
+// the provided shared buffer pool for parsing incoming messages. Depending on
+// the application's workload, this could result in reduced memory allocation.
+//
+// If you are unsure about how to implement a memory pool but want to utilize
+// one, begin with grpc.NewSharedBufferPool.
+//
+// Note: The shared buffer pool feature will not be active if any of the
+// following options are used: StatsHandler, EnableTracing, or binary logging.
+// In such cases, the shared buffer pool will be ignored.
+//
+// Note: It is not recommended to use the shared buffer pool when compression is
+// enabled.
+func RecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.ServerOption {
+ return internal.RecvBufferPool.(func(grpc.SharedBufferPool) grpc.ServerOption)(bufferPool)
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index df2d5a270d6..29b6b42c3f8 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -448,8 +448,8 @@ github.com/golang/glog/internal/stackdump
## explicit
github.com/golang/groupcache/lru
github.com/golang/groupcache/singleflight
-# github.com/golang/protobuf v1.5.3
-## explicit; go 1.9
+# github.com/golang/protobuf v1.5.4
+## explicit; go 1.17
github.com/golang/protobuf/proto
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
@@ -537,7 +537,7 @@ github.com/gosimple/slug
# github.com/grafana-tools/sdk v0.0.0-20220919052116-6562121319fc
## explicit; go 1.13
github.com/grafana-tools/sdk
-# github.com/grafana/dskit v0.0.0-20240311184239-73feada6c0d7
+# github.com/grafana/dskit v0.0.0-20240403100540-1435abf0da58
## explicit; go 1.20
github.com/grafana/dskit/backoff
github.com/grafana/dskit/ballast
@@ -814,6 +814,9 @@ github.com/pierrec/lz4/v4/internal/lz4block
github.com/pierrec/lz4/v4/internal/lz4errors
github.com/pierrec/lz4/v4/internal/lz4stream
github.com/pierrec/lz4/v4/internal/xxh32
+# github.com/pires/go-proxyproto v0.7.0
+## explicit; go 1.18
+github.com/pires/go-proxyproto
# github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
## explicit; go 1.14
github.com/pkg/browser
@@ -906,7 +909,7 @@ github.com/prometheus/exporter-toolkit/web
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
-# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20240319094147-5a0ec4187ab5
+# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20240327215316-a97e07f28d7b
## explicit; go 1.21
github.com/prometheus/prometheus/config
github.com/prometheus/prometheus/discovery
@@ -1213,14 +1216,14 @@ golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/pkcs12
golang.org/x/crypto/pkcs12/internal/rc2
-# golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
+# golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/slices
# golang.org/x/mod v0.16.0
## explicit; go 1.18
golang.org/x/mod/semver
-# golang.org/x/net v0.22.0
+# golang.org/x/net v0.23.0
## explicit; go 1.18
golang.org/x/net/bpf
golang.org/x/net/context/ctxhttp
@@ -1367,6 +1370,7 @@ google.golang.org/grpc/credentials/oauth
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/gzip
google.golang.org/grpc/encoding/proto
+google.golang.org/grpc/experimental
google.golang.org/grpc/grpclog
google.golang.org/grpc/health/grpc_health_v1
google.golang.org/grpc/internal
@@ -1459,10 +1463,10 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094
## explicit
gopkg.in/yaml.v3
-# k8s.io/apimachinery v0.29.2
+# k8s.io/apimachinery v0.29.3
## explicit; go 1.21
k8s.io/apimachinery/pkg/util/runtime
-# k8s.io/client-go v0.29.2
+# k8s.io/client-go v0.29.3
## explicit; go 1.21
k8s.io/client-go/tools/metrics
k8s.io/client-go/util/workqueue
@@ -1527,7 +1531,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk
sigs.k8s.io/yaml
sigs.k8s.io/yaml/goyaml.v2
sigs.k8s.io/yaml/goyaml.v3
-# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20240319094147-5a0ec4187ab5
+# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20240327215316-a97e07f28d7b
# github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe
# gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094
# github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6