Skip to content

Commit

Permalink
*: phase out ReusableListener bool
Browse files Browse the repository at this point in the history
  • Loading branch information
tbg committed Jul 12, 2023
1 parent a732de0 commit 27af371
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 23 deletions.
9 changes: 0 additions & 9 deletions pkg/base/test_server_args.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,15 +205,6 @@ type TestClusterArgs struct {
// and potentially adjusted according to ReplicationMode.
ServerArgsPerNode map[int]TestServerArgs

// If reusable listeners is true, then restart should keep listeners untouched
// so that servers are kept on the same ports. It is up to the test to set
// proxy listeners to TestServerArgs.Listener that would survive
// net.Listener.Close() and then allow restarted server to use them again.
// See testutils.ListenerRegistry.
//
// TODO(during PR): phase this out.
ReusableListeners bool

// If set, listeners will be created from the below registry and they will be
// retained across restarts (i.e. servers are kept on the same ports, but
// avoiding races where another process grabs the port while the server is
Expand Down
6 changes: 2 additions & 4 deletions pkg/cli/debug_recover_loss_of_quorum_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,6 @@ func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) {
StickyEngineRegistry: storeReg,
},
},
Listener: listenerReg.MustGetOrCreate(t, i),
StoreSpecs: []base.StoreSpec{
{
InMemory: true,
Expand All @@ -471,8 +470,8 @@ func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) {
}
}
tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{
ReusableListeners: true,
ServerArgsPerNode: sa,
ReusableListenerReg: listenerReg,
ServerArgsPerNode: sa,
})
tc.Start(t)
s := sqlutils.MakeSQLRunner(tc.Conns[0])
Expand Down Expand Up @@ -555,7 +554,6 @@ func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) {
// NB: If recovery is not performed, server will just hang on startup.
// This is caused by liveness range becoming unavailable and preventing any
// progress. So it is likely that test will timeout if basic workflow fails.
require.NoError(t, listenerReg.MustGet(t, 0).Reopen())
require.NoError(t, tc.RestartServer(0), "restart failed")
s = sqlutils.MakeSQLRunner(tc.Conns[0])

Expand Down
5 changes: 2 additions & 3 deletions pkg/kv/kvserver/client_store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,11 +129,10 @@ func TestStoreLoadReplicaQuiescent(t *testing.T) {
kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, expOnly)

tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ReusableListeners: true,
ReplicationMode: base.ReplicationManual,
ReusableListenerReg: listenerReg,
ServerArgs: base.TestServerArgs{
Settings: st,
Listener: listenerReg.MustGetOrCreate(t, 0),
RaftConfig: base.RaftConfig{
RaftTickInterval: 100 * time.Millisecond,
},
Expand Down
9 changes: 4 additions & 5 deletions pkg/kv/kvserver/loqrecovery/server_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -630,7 +630,6 @@ func TestRetrieveApplyStatus(t *testing.T) {

for _, id := range planDetails.UpdatedNodes {
tc.StopServer(int(id.NodeID - 1))
require.NoError(t, lReg.MustGet(t, int(id.NodeID-1)).Reopen())
require.NoError(t, tc.RestartServer(int(id.NodeID-1)), "failed to restart node")
}

Expand Down Expand Up @@ -682,7 +681,6 @@ func TestRejectBadVersionApplication(t *testing.T) {

tc.StopServer(1)
require.NoError(t, pss[1].SavePlan(plan), "failed to inject plan into storage")
require.NoError(t, lReg.MustGet(t, 1).Reopen())
require.NoError(t, tc.RestartServer(1), "failed to restart server")

r, err := adm.RecoveryVerify(ctx, &serverpb.RecoveryVerifyRequest{})
Expand All @@ -705,6 +703,8 @@ func prepTestCluster(
*testcluster.TestCluster,
server.StickyInMemEnginesRegistry,
map[int]loqrecovery.PlanStore,
// TODO(during PR): no caller uses this now except to close it, so close it
// via stopper and don't return it.
*listenerutil.ListenerRegistry,
) {
skip.UnderStressRace(t, "cluster frequently fails to start under stress race")
Expand All @@ -714,8 +714,8 @@ func prepTestCluster(
lReg := listenerutil.NewListenerRegistry()

args := base.TestClusterArgs{
ServerArgsPerNode: make(map[int]base.TestServerArgs),
ReusableListeners: true,
ServerArgsPerNode: make(map[int]base.TestServerArgs),
ReusableListenerReg: lReg,
}
for i := 0; i < nodes; i++ {
args.ServerArgsPerNode[i] = base.TestServerArgs{
Expand All @@ -733,7 +733,6 @@ func prepTestCluster(
StickyInMemoryEngineID: strconv.FormatInt(int64(i), 10),
},
},
Listener: lReg.MustGetOrCreate(t, i),
}
}
tc := testcluster.NewTestCluster(t, nodes, args)
Expand Down
4 changes: 2 additions & 2 deletions pkg/util/startup/startup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,8 @@ func runCircuitBreakerTestForKey(
kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false)

args := base.TestClusterArgs{
ServerArgsPerNode: make(map[int]base.TestServerArgs),
ReusableListeners: true,
ServerArgsPerNode: make(map[int]base.TestServerArgs),
ReusableListenerReg: lReg,
}
var enableFaults atomic.Bool
for i := 0; i < nodes; i++ {
Expand Down

0 comments on commit 27af371

Please sign in to comment.