Skip to content

Commit

Permalink
multi-tenant: Bump uninitialized host cluster version
Browse files Browse the repository at this point in the history
Previously, in cases where the version value in system.tenant_settings was
uninitialized, we assumed that that the version was 22.1. With the change of
the build tag to 23.1, we need to adjust the previous version to 22.1.76. This
will need one more bump when we finally mint the 22.2 version number in master.

Also renamed "host cluster" to "storage cluster" in a few places.

Resolves: #91589

Release note: None
  • Loading branch information
ajstorm committed Nov 10, 2022
1 parent d6ae965 commit 24ee721
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 22 deletions.
42 changes: 21 additions & 21 deletions pkg/server/settingswatcher/settings_watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ type SettingsWatcher struct {
updater settings.Updater
values map[string]settingsValue
overrides map[string]settings.EncodedValue
// hostClusterVersion is the cache of the host cluster version
// inside secondary tenants. It will not be initialized in a system
// storageClusterVersion is the cache of the storage cluster version
// inside secondary tenants. It will be uninitialized in a system
// tenant.
hostClusterVersion clusterversion.ClusterVersion
storageClusterVersion clusterversion.ClusterVersion
}

// testingWatcherKnobs allows the client to inject testing knobs into
Expand Down Expand Up @@ -362,17 +362,17 @@ func (s *SettingsWatcher) updateOverrides(ctx context.Context) {
var newVersion clusterversion.ClusterVersion
if err := protoutil.Unmarshal([]byte(val.Value), &newVersion); err != nil {
log.Warningf(ctx, "ignoring invalid cluster version: %newVersion - "+
"the lack of a refreshed host cluster version in a secondary tenant may prevent tenant upgrade", err)
"the lack of a refreshed storage cluster version in a secondary tenant may prevent tenant upgrade", err)
} else {
// We don't want to fully process the override in the case
// where we're dealing with the "version" setting, as we want
// the tenant to have full control over its version setting.
// Instead, we take the override value and cache it as the
// hostClusterVersion for use in determining if it's safe to
// storageClusterVersion for use in determining if it's safe to
// upgrade the tenant (since we don't want to upgrade tenants
// to a version that's beyond that of the host cluster).
log.Infof(ctx, "updating host cluster cached version from: %v to: %v", s.mu.hostClusterVersion, newVersion)
s.mu.hostClusterVersion = newVersion
// to a version that's beyond that of the storage cluster).
log.Infof(ctx, "updating storage cluster cached version from: %v to: %v", s.mu.storageClusterVersion, newVersion)
s.mu.storageClusterVersion = newVersion
}
continue
}
Expand Down Expand Up @@ -420,24 +420,24 @@ func (s *SettingsWatcher) IsOverridden(settingName string) bool {
return exists
}

// GetHostClusterVersion returns the host cluster version cached in the
// SettingsWatcher. The host cluster version info in the settings watcher is
// GetStorageClusterVersion returns the storage cluster version cached in the
// SettingsWatcher. The storage cluster version info in the settings watcher is
// populated by a cluster settings override sent from the system tenant to all
// tenants, anytime the cluster version changes (or when a new cluster is
// initialized in version 23.1 or later). In cases where the host cluster
// tenants, anytime the storage cluster version changes (or when a new cluster
// is initialized in version 23.1 or later). In cases where the storage cluster
// version is not initialized, we assume that it's running version 22.2,
// the last version which did not properly initialize this value.
func (s *SettingsWatcher) GetHostClusterVersion(ctx context.Context) clusterversion.ClusterVersion {
func (s *SettingsWatcher) GetStorageClusterVersion() clusterversion.ClusterVersion {
s.mu.Lock()
defer s.mu.Unlock()
if s.mu.hostClusterVersion.Equal(clusterversion.ClusterVersion{Version: roachpb.Version{Major: 0, Minor: 0}}) {
// If the host cluster version is not initialized in the
// settingswatcher, it means that the cluster has not yet been upgraded
// to 23.1, or was initialized before version 23.1. As a result, assume
// that host cluster is at version 22.2.
if s.mu.storageClusterVersion.Equal(clusterversion.ClusterVersion{Version: roachpb.Version{Major: 0, Minor: 0}}) {
// If the storage cluster version is not initialized in the
// settingswatcher, it means that the storage cluster has not yet been
// upgraded to 23.1. As a result, assume that storage cluster is at
// version 22.2.
// TODO(ajstorm): change this to 22.2 once the new version is minted.
hostClusterVersion := roachpb.Version{Major: 22, Minor: 1}
return clusterversion.ClusterVersion{Version: hostClusterVersion}
storageClusterVersion := roachpb.Version{Major: 22, Minor: 1, Internal: 76}
return clusterversion.ClusterVersion{Version: storageClusterVersion}
}
return s.mu.hostClusterVersion
return s.mu.storageClusterVersion
}
2 changes: 1 addition & 1 deletion pkg/upgrade/upgrademanager/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func safeToUpgradeTenant(
if overrides == nil {
return false, errors.AssertionFailedf("overrides informer is nil in secondary tenant")
}
hostClusterVersion := overrides.(*settingswatcher.SettingsWatcher).GetHostClusterVersion(ctx)
hostClusterVersion := overrides.(*settingswatcher.SettingsWatcher).GetStorageClusterVersion()
if hostClusterVersion.Less(tenantClusterVersion.Version) {
// We assert here if we find a tenant with a higher cluster version than
// the host cluster. It's dangerous to run in this mode because the
Expand Down

0 comments on commit 24ee721

Please sign in to comment.