From c319c093d8a803ca855f3299b2a153138b326ef6 Mon Sep 17 00:00:00 2001 From: Raphael 'kena' Poss Date: Thu, 21 Sep 2023 17:42:19 +0200 Subject: [PATCH 1/2] sqlccl: fix a data race in TestTenantTempTableCleanup Release note: None --- .../testccl/sqlccl/temp_table_clean_test.go | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/pkg/ccl/testccl/sqlccl/temp_table_clean_test.go b/pkg/ccl/testccl/sqlccl/temp_table_clean_test.go index c10eb5ab1c7d..4e84ef97f22f 100644 --- a/pkg/ccl/testccl/sqlccl/temp_table_clean_test.go +++ b/pkg/ccl/testccl/sqlccl/temp_table_clean_test.go @@ -96,18 +96,21 @@ func TestTenantTempTableCleanup(t *testing.T) { ) tenantStoppers := []*stop.Stopper{stop.NewStopper(), stop.NewStopper()} - tenantSettings := cluster.MakeTestingClusterSettings() - sql.TempObjectCleanupInterval.Override(ctx, &tenantSettings.SV, time.Second) - sql.TempObjectWaitInterval.Override(ctx, &tenantSettings.SV, time.Second*0) - // Set up sessions to expire within 5 seconds of a - // nodes death. - slinstance.DefaultTTL.Override(ctx, &tenantSettings.SV, 5*time.Second) - slinstance.DefaultHeartBeat.Override(ctx, &tenantSettings.SV, time.Second) + tenantSettings := func() *cluster.Settings { + st := cluster.MakeTestingClusterSettings() + sql.TempObjectCleanupInterval.Override(ctx, &st.SV, time.Second) + sql.TempObjectWaitInterval.Override(ctx, &st.SV, time.Second*0) + // Set up sessions to expire within 5 seconds of a + // nodes death. + slinstance.DefaultTTL.Override(ctx, &st.SV, 5*time.Second) + slinstance.DefaultHeartBeat.Override(ctx, &st.SV, time.Second) + return st + } _, tenantPrimaryDB := serverutils.StartTenant(t, tc.Server(0), base.TestTenantArgs{ TenantID: serverutils.TestTenantID(), - Settings: tenantSettings, + Settings: tenantSettings(), TestingKnobs: tenantTempKnobSettings, Stopper: tenantStoppers[0], }) @@ -124,7 +127,7 @@ func TestTenantTempTableCleanup(t *testing.T) { _, tenantSecondDB := serverutils.StartTenant(t, tc.Server(1), base.TestTenantArgs{ TenantID: serverutils.TestTenantID(), - Settings: tenantSettings, + Settings: tenantSettings(), Stopper: tenantStoppers[1], }) tenantSecondSQL := sqlutils.MakeSQLRunner(tenantSecondDB) @@ -165,7 +168,7 @@ func TestTenantTempTableCleanup(t *testing.T) { _, tenantPrimaryDB = serverutils.StartTenant(t, tc.Server(0), base.TestTenantArgs{ TenantID: serverutils.TestTenantID(), - Settings: tenantSettings, + Settings: tenantSettings(), TestingKnobs: tenantTempKnobSettings, Stopper: tenantStoppers[0]}) tenantSQL = sqlutils.MakeSQLRunner(tenantPrimaryDB) From e0b095b6949a4aa16e7b7932d5fc1fa86c73cfa3 Mon Sep 17 00:00:00 2001 From: Raphael 'kena' Poss Date: Thu, 21 Sep 2023 17:53:38 +0200 Subject: [PATCH 2/2] settings: clarify missharing of cluster.Settings Prior to this patch, if a test incorrectly shared a `cluster.Settings` object across servers, there was a chance the error would pop up as a race condition. We really want this to pop up as an error instead. This patch ensures that the race detector is always happy so a clean error is always reported in misuses. Release note: None --- pkg/settings/values.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/pkg/settings/values.go b/pkg/settings/values.go index 3967730e1d72..09683e472b11 100644 --- a/pkg/settings/values.go +++ b/pkg/settings/values.go @@ -51,7 +51,7 @@ type Values struct { opaque interface{} } -type classCheck int8 +type classCheck uint32 const ( // classCheckUndefined is used when the settings.Values hasn't been @@ -67,6 +67,14 @@ const ( classCheckVirtualCluster ) +func (ck *classCheck) get() classCheck { + return classCheck(atomic.LoadUint32((*uint32)(ck))) +} + +func (ck *classCheck) set(nv classCheck) { + atomic.StoreUint32((*uint32)(ck), uint32(nv)) +} + const numSlots = MaxSettings + 1 type valuesContainer struct { @@ -150,20 +158,20 @@ TIP: avoid using the same cluster.Settings or settings.Value object across multi // SpecializeForSystemInterface marks the values container as // pertaining to the system interface. func (sv *Values) SpecializeForSystemInterface() { - if sv.classCheck != classCheckUndefined && sv.classCheck != classCheckSystemInterface { + if ck := sv.classCheck.get(); ck != classCheckUndefined && ck != classCheckSystemInterface { panic(errors.AssertionFailedf(alreadySpecializedError)) } - sv.classCheck = classCheckSystemInterface + sv.classCheck.set(classCheckSystemInterface) } // SpecializeForVirtualCluster marks this container as pertaining to // a virtual cluster, after which use of SystemOnly values is // disallowed. func (sv *Values) SpecializeForVirtualCluster() { - if sv.classCheck != classCheckUndefined && sv.classCheck != classCheckVirtualCluster { + if ck := sv.classCheck.get(); ck != classCheckUndefined && ck != classCheckVirtualCluster { panic(errors.AssertionFailedf(alreadySpecializedError)) } - sv.classCheck = classCheckVirtualCluster + sv.classCheck.set(classCheckVirtualCluster) for slot, setting := range slotTable { if setting != nil && setting.Class() == SystemOnly { sv.container.forbidden[slot] = true @@ -174,7 +182,7 @@ func (sv *Values) SpecializeForVirtualCluster() { // SpecializedToVirtualCluster returns true if this container is for a // virtual cluster (i.e. SpecializeToVirtualCluster() was called). func (sv *Values) SpecializedToVirtualCluster() bool { - return sv.classCheck == classCheckVirtualCluster + return sv.classCheck.get() == classCheckVirtualCluster } // Opaque returns the argument passed to Init.