Skip to content

Commit

Permalink
e2e/store: try to fix Series() limit test again (thanos-io#6522)
Browse files Browse the repository at this point in the history
I finally managed to reproduce this failure locally with
efficientgo/e2e@c316eb9.
The added t.Logf() showed that is the problem is that with a lower bytes
limit, it might hit the series or chunks part first. I have bumped the
bytes limit. I calculated the new bytes limit by checking how much bytes
are allocated before sending the last chunk.

I have also noticed that one block is created without a delay. Update it
so that it would be like the others.

Include objstore@main update with
https://github.com/thanos-io/objstore/pull/62/files so that Iter() would
always return an error on a timeout.

Signed-off-by: Giedrius Statkevičius <[email protected]>
  • Loading branch information
GiedriusS committed Jul 27, 2023
1 parent c2709a4 commit 295dba9
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 8 deletions.
2 changes: 1 addition & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
test:
executor: golang-test
environment:
GO111MODULE: 'on'
GO111MODULE: "on"
steps:
- git-shallow-clone/checkout
- go/mod-download-cached
Expand Down
4 changes: 2 additions & 2 deletions docs/storage.md
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ Example working AWS IAM policy for user:
To test the policy, set env vars for S3 access for *empty, not used* bucket as well as:

```
THANOS_TEST_OBJSTORE_SKIP=GCS,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI
THANOS_TEST_OBJSTORE_SKIP=GCS,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI,OBS
THANOS_ALLOW_EXISTING_BUCKET_USE=true
```

Expand Down Expand Up @@ -243,7 +243,7 @@ We need access to CreateBucket and DeleteBucket and access to all buckets:
}
```

With this policy you should be able to run set `THANOS_TEST_OBJSTORE_SKIP=GCS,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI` and unset `S3_BUCKET` and run all tests using `make test`.
With this policy you should be able to run set `THANOS_TEST_OBJSTORE_SKIP=GCS,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI,OBS` and unset `S3_BUCKET` and run all tests using `make test`.

Details about AWS policies: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html

Expand Down
4 changes: 2 additions & 2 deletions pkg/replicate/replicator.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func RunReplicate(
fromBkt, err := client.NewBucket(
logger,
fromConfContentYaml,
prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "from"}, reg),
prometheus.WrapRegistererWithPrefix("thanos_", prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "from"}, reg)),
component.Replicate.String(),
)
if err != nil {
Expand All @@ -136,7 +136,7 @@ func RunReplicate(
toBkt, err := client.NewBucket(
logger,
toConfContentYaml,
prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "to"}, reg),
prometheus.WrapRegistererWithPrefix("thanos_", prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "to"}, reg)),
component.Replicate.String(),
)
if err != nil {
Expand Down
10 changes: 7 additions & 3 deletions test/e2e/store_gateway_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -825,7 +825,7 @@ config:
},
string(cacheCfg),
"",
[]string{"--store.grpc.downloaded-bytes-limit=196627B"},
[]string{"--store.grpc.downloaded-bytes-limit=310176B"},
)

testutil.Ok(t, e2e.StartAndWaitReady(store1, store2, store3))
Expand All @@ -842,6 +842,7 @@ config:
extLset := labels.FromStrings("ext1", "value1", "replica", "1")
extLset2 := labels.FromStrings("ext1", "value1", "replica", "2")
extLset3 := labels.FromStrings("ext1", "value2", "replica", "3")
extLset4 := labels.FromStrings("ext1", "value2", "replica", "4")

ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
t.Cleanup(cancel)
Expand All @@ -853,7 +854,7 @@ config:
testutil.Ok(t, err)
id3, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset3, 0, metadata.NoneFunc)
testutil.Ok(t, err)
id4, err := e2eutil.CreateBlock(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), extLset, 0, metadata.NoneFunc)
id4, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset4, 0, metadata.NoneFunc)
testutil.Ok(t, err)
l := log.NewLogfmtLogger(os.Stdout)
bkt, err := s3.NewBucketWithConfig(l,
Expand Down Expand Up @@ -897,8 +898,11 @@ config:

testutil.Ok(t, runutil.RetryWithLog(log.NewLogfmtLogger(os.Stdout), 5*time.Second, ctx.Done(), func() error {
if _, _, _, err := promclient.NewDefaultClient().QueryInstant(ctx, urlParse(t, "http://"+q3.Endpoint("http")), testQuery, now, opts); err != nil {
if err != nil {
t.Logf("got error: %s", err)
}
e := err.Error()
if strings.Contains(e, "load chunks") && strings.Contains(e, "exceeded bytes limit while fetching chunks: limit 196627 violated") {
if strings.Contains(e, "load chunks") && strings.Contains(e, "exceeded bytes limit while fetching chunks: limit 310176 violated") {
return nil
}
return err
Expand Down

0 comments on commit 295dba9

Please sign in to comment.