From 52e18590aeef34c2657f0aa602661cfe4b02915b Mon Sep 17 00:00:00 2001 From: Oleg Afanasyev Date: Tue, 25 Oct 2022 10:55:31 +0100 Subject: [PATCH] roachtest: mvcc_gc increase GC waiting timeouts Test waits for mvcc gc queue to collect old data. Replicas are enqueued for GC asynchronously and it could take long time for garbage to get collected which causes test to fail. This commit bumps retry timeout to remove false negatives. Release note: None Fixes: #90020 --- pkg/cmd/roachtest/tests/mvcc_gc.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/roachtest/tests/mvcc_gc.go b/pkg/cmd/roachtest/tests/mvcc_gc.go index 67c09463c9ff..85fd38b00726 100644 --- a/pkg/cmd/roachtest/tests/mvcc_gc.go +++ b/pkg/cmd/roachtest/tests/mvcc_gc.go @@ -70,6 +70,8 @@ func runMVCCGC(ctx context.Context, t test.Test, c cluster.Cluster) { // How many times test repeats generate/cleanup cycles after initial one. const cleanupRuns = 3 + // How long to wait for data to be GCd during assert loop. + const gcRetryTimeout = 7 * time.Minute c.Put(ctx, t.Cockroach(), "./cockroach") s := install.MakeClusterSettings() @@ -129,7 +131,7 @@ func runMVCCGC(ctx context.Context, t test.Test, c cluster.Cluster) { t.L().Printf("partially deleted some data using tombstones") - assertRangesWithGCRetry(ctx, t, c, 5*time.Minute, m, func() error { + assertRangesWithGCRetry(ctx, t, c, gcRetryTimeout, m, func() error { totals, rangeCount := collectTableMVCCStatsOrFatal(t, conn, m) return checkRangesHaveNoRangeTombstones(totals, rangeCount) }) @@ -152,7 +154,7 @@ func runMVCCGC(ctx context.Context, t test.Test, c cluster.Cluster) { t.Fatal(err) } - assertRangesWithGCRetry(ctx, t, c, 5*time.Minute, m, func() error { + assertRangesWithGCRetry(ctx, t, c, gcRetryTimeout, m, func() error { totals, details := collectStatsAndConsistencyOrFail(t, conn, m) return checkRangesConsistentAndHaveNoData(totals, details) })