Skip to content

Commit

Permalink
pkg/ccl: Unskip TestTenantStatusAPI/tenant_ranges/pagination
Browse files Browse the repository at this point in the history
Fixes: #92979

Previously, in #97386,
we skipped test_tenant_ranges_pagination because it was marked as flaky.

The test makes a request for a single range and expects an offset of `1`
back. It then uses this offset to request a second range, and expects
an offset of `2`. This means that the test requires at least 3 ranges
to exist on the tenant.

The test was flaking on the assertion that the offset returned by the
second request came back as `2`. Instead, it was flaking when the
offset came back as `0`, which signifies that there are no more
ranges to process.

We learned that the tenant create process has an asycnhronous splitting
of ranges that occurs, which is what would lead to this sporadic scenario
where not enough ranges existed (yet) for the test to succeed.

This patch updates the test with a `testutils.SucceedsSoon` clause that
checks first that `crdb_internal.ranges` contains at least 3 ranges,
prior to making the second request. This should provide sufficient time
for the range split queue to be processed and eliminate the vast majority
of these test flakes.

Release note: none
  • Loading branch information
abarganier committed Mar 21, 2023
1 parent 5f5cf26 commit 0c69318
Showing 1 changed file with 27 additions and 13 deletions.
40 changes: 27 additions & 13 deletions pkg/ccl/serverccl/statusccl/tenant_status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1290,8 +1290,6 @@ func testTenantRangesRPC(_ context.Context, t *testing.T, helper serverccl.Tenan
})

t.Run("test tenant ranges pagination", func(t *testing.T) {
skip.WithIssue(t, 92979,
"flaky test, difficult to reproduce locally. Skip until resolved.")
ctx := context.Background()
resp1, err := tenantA.TenantRanges(ctx, &serverpb.TenantRangesRequest{
Limit: 1,
Expand All @@ -1302,18 +1300,34 @@ func testTenantRangesRPC(_ context.Context, t *testing.T, helper serverccl.Tenan
require.Len(t, ranges.Ranges, 1)
}

resp2, err := tenantA.TenantRanges(ctx, &serverpb.TenantRangesRequest{
Limit: 1,
Offset: resp1.Next,
sql := helper.TestCluster().TenantConn(0)
// Wait for the split queue to process some before requesting the 2nd range.
// We expect an offset for the 3rd range, so wait until at least 3 ranges exist.
testutils.SucceedsSoon(t, func() error {
res := sql.QueryStr(t, "SELECT count(*) FROM crdb_internal.ranges")
require.Equal(t, len(res), 1)
require.Equal(t, len(res[0]), 1)
rangeCount, err := strconv.Atoi(res[0][0])
require.NoError(t, err)
if rangeCount < 3 {
return errors.Newf("expected >= 3 ranges, got %d", rangeCount)
}

resp2, err := tenantA.TenantRanges(ctx, &serverpb.TenantRangesRequest{
Limit: 1,
Offset: resp1.Next,
})
require.NoError(t, err)
require.Equal(t, 2, int(resp2.Next))
for locality, ranges := range resp2.RangesByLocality {
require.Len(t, ranges.Ranges, 1)
// Verify pagination functions based on ascending RangeID order.
require.True(t,
resp1.RangesByLocality[locality].Ranges[0].RangeID < ranges.Ranges[0].RangeID)
}
return nil
})
require.NoError(t, err)
require.Equal(t, 2, int(resp2.Next))
for locality, ranges := range resp2.RangesByLocality {
require.Len(t, ranges.Ranges, 1)
// Verify pagination functions based on ascending RangeID order.
require.True(t,
resp1.RangesByLocality[locality].Ranges[0].RangeID < ranges.Ranges[0].RangeID)
}

})
}

Expand Down

0 comments on commit 0c69318

Please sign in to comment.