From 295dba926e0b1b6546122a1189487ed45182040f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Wed, 12 Jul 2023 12:11:50 +0300 Subject: [PATCH] e2e/store: try to fix Series() limit test again (#6522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I finally managed to reproduce this failure locally with https://github.com/efficientgo/e2e/commit/c316eb95ae5b0fa65b0be110f8cfc3c48e8810bb. The added t.Logf() showed that is the problem is that with a lower bytes limit, it might hit the series or chunks part first. I have bumped the bytes limit. I calculated the new bytes limit by checking how much bytes are allocated before sending the last chunk. I have also noticed that one block is created without a delay. Update it so that it would be like the others. Include objstore@main update with https://github.com/thanos-io/objstore/pull/62/files so that Iter() would always return an error on a timeout. Signed-off-by: Giedrius Statkevičius --- .circleci/config.yml | 2 +- docs/storage.md | 4 ++-- pkg/replicate/replicator.go | 4 ++-- test/e2e/store_gateway_test.go | 10 +++++++--- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 50c82994f0..7098932b84 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,7 +18,7 @@ jobs: test: executor: golang-test environment: - GO111MODULE: 'on' + GO111MODULE: "on" steps: - git-shallow-clone/checkout - go/mod-download-cached diff --git a/docs/storage.md b/docs/storage.md index f283e5b5e0..24634ad3c7 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -209,7 +209,7 @@ Example working AWS IAM policy for user: To test the policy, set env vars for S3 access for *empty, not used* bucket as well as: ``` -THANOS_TEST_OBJSTORE_SKIP=GCS,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI +THANOS_TEST_OBJSTORE_SKIP=GCS,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI,OBS THANOS_ALLOW_EXISTING_BUCKET_USE=true ``` @@ -243,7 +243,7 @@ We need access to CreateBucket and DeleteBucket and access to all buckets: } ``` -With this policy you should be able to run set `THANOS_TEST_OBJSTORE_SKIP=GCS,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI` and unset `S3_BUCKET` and run all tests using `make test`. +With this policy you should be able to run set `THANOS_TEST_OBJSTORE_SKIP=GCS,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI,OBS` and unset `S3_BUCKET` and run all tests using `make test`. Details about AWS policies: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html diff --git a/pkg/replicate/replicator.go b/pkg/replicate/replicator.go index 1c42c14c8a..151c667197 100644 --- a/pkg/replicate/replicator.go +++ b/pkg/replicate/replicator.go @@ -117,7 +117,7 @@ func RunReplicate( fromBkt, err := client.NewBucket( logger, fromConfContentYaml, - prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "from"}, reg), + prometheus.WrapRegistererWithPrefix("thanos_", prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "from"}, reg)), component.Replicate.String(), ) if err != nil { @@ -136,7 +136,7 @@ func RunReplicate( toBkt, err := client.NewBucket( logger, toConfContentYaml, - prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "to"}, reg), + prometheus.WrapRegistererWithPrefix("thanos_", prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "to"}, reg)), component.Replicate.String(), ) if err != nil { diff --git a/test/e2e/store_gateway_test.go b/test/e2e/store_gateway_test.go index b5039ab408..4a888b1ff1 100644 --- a/test/e2e/store_gateway_test.go +++ b/test/e2e/store_gateway_test.go @@ -825,7 +825,7 @@ config: }, string(cacheCfg), "", - []string{"--store.grpc.downloaded-bytes-limit=196627B"}, + []string{"--store.grpc.downloaded-bytes-limit=310176B"}, ) testutil.Ok(t, e2e.StartAndWaitReady(store1, store2, store3)) @@ -842,6 +842,7 @@ config: extLset := labels.FromStrings("ext1", "value1", "replica", "1") extLset2 := labels.FromStrings("ext1", "value1", "replica", "2") extLset3 := labels.FromStrings("ext1", "value2", "replica", "3") + extLset4 := labels.FromStrings("ext1", "value2", "replica", "4") ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) t.Cleanup(cancel) @@ -853,7 +854,7 @@ config: testutil.Ok(t, err) id3, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset3, 0, metadata.NoneFunc) testutil.Ok(t, err) - id4, err := e2eutil.CreateBlock(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), extLset, 0, metadata.NoneFunc) + id4, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset4, 0, metadata.NoneFunc) testutil.Ok(t, err) l := log.NewLogfmtLogger(os.Stdout) bkt, err := s3.NewBucketWithConfig(l, @@ -897,8 +898,11 @@ config: testutil.Ok(t, runutil.RetryWithLog(log.NewLogfmtLogger(os.Stdout), 5*time.Second, ctx.Done(), func() error { if _, _, _, err := promclient.NewDefaultClient().QueryInstant(ctx, urlParse(t, "http://"+q3.Endpoint("http")), testQuery, now, opts); err != nil { + if err != nil { + t.Logf("got error: %s", err) + } e := err.Error() - if strings.Contains(e, "load chunks") && strings.Contains(e, "exceeded bytes limit while fetching chunks: limit 196627 violated") { + if strings.Contains(e, "load chunks") && strings.Contains(e, "exceeded bytes limit while fetching chunks: limit 310176 violated") { return nil } return err