From 86b2eec0dc9d5eda9a0fad8339fab313c25703d8 Mon Sep 17 00:00:00 2001 From: arulajmani Date: Sun, 17 Jan 2021 21:01:54 -0500 Subject: [PATCH] sql: add support for dropping regions from multi-region databases This patch builds upon the drop enum infrastructure to add the ability to drop regions from a multi-region database. It adds support for dropping any region that isn't the primary region. That change is forthcoming. When dropping a region, the multi-region enum entry is moved to "READ_ONLY" capability. This is followed by validation in the type schema changer to ascertain that no table is using the region value being dropped in one of its rows. The database descriptor, which unfortunately duplicates the list of regions, also loses the region entry when the value is moved to "READ ONLY" mode. If validation is successful, the region is removed from the multi-region enum. If, however, validation is unsuccessful, the region value is moved back to `PUBLIC` and the entry is restored on the database descriptor. Special behavior around REGIONAL BY TABLE tables and the effect of that on type descriptor dependencies is also worth calling out here. Previously, these tables did not capture their "implicit" bidirectional dependency with the multi-region type descriptor. The dependency is described as implicit because even though no column on the table uses the multi-region type descriptor to store the homing region, a value from the type descriptor is stored on the table's locality config. This dependency must be checked against when dropping regions or the type descriptor itself. Before this patch, all dependencies a table had with any type descriptor could be constructed from the column descriptors on the table descriptor. As mentioned previously, this is no longer possible now that we have REGIONAL BY TABLE tables in the mix. As such, this requires modifying how type references are constructed for a table descriptor. In particular, if a table is a REGIONAL BY TABLE table homed in an explicit (non-primary) region, the multi-region enum ID is read off the database descriptor and added to the list of types referenced by the table descriptor. Now that a particular locality config state may have this implicit dependency, switching locality patterns now requires us to be cognizant of it. This is to say, when moving from a locality pattern that has a dependency on the multi-region type descriptor to one that does not, the back reference must be removed (and vice-versa). As such, modifying the descriptor directly is discourage.Instead, new methods of the form `alterTableDescLocality...` are provided which negotiate the dependency linking/unlinking appropriately. Lastly, this patch also adds validation to ensure that every REGIONAL BY TABLE table that is homed in an explicit region stores a reference to the multi-region type descriptor. The validation semantics around REGIONAL BY TABLE tables and GLOBAL tables are also handled appropriately. Informs #58333 Closes #57389 Release note (sql change): ALTER DATABASE ... DROP REGION is now implemented. --- docs/generated/eventlog.md | 23 + pkg/ccl/backupccl/backupbase/targets.go | 8 +- pkg/ccl/backupccl/restore_job.go | 21 +- pkg/sql/BUILD.bazel | 1 + pkg/sql/alter_database.go | 155 +++- pkg/sql/alter_table_locality.go | 138 ++- pkg/sql/alter_type.go | 45 +- pkg/sql/catalog/descpb/structured.pb.go | 130 +-- pkg/sql/catalog/descpb/structured.proto | 14 + pkg/sql/catalog/descriptor.go | 3 +- pkg/sql/catalog/tabledesc/structured.go | 194 +++- pkg/sql/catalog/typedesc/type_desc.go | 25 +- pkg/sql/create_table.go | 21 +- pkg/sql/descriptor.go | 14 +- pkg/sql/drop_type.go | 14 +- .../logictest/testdata/logic_test/alter_type | 26 +- .../testdata/logic_test/crdb_internal_tenant | 1 + .../logictest/testdata/logic_test/multiregion | 138 ++- pkg/sql/logictest/testdata/logic_test/notice | 2 +- pkg/sql/multiregion_test.go | 129 +++ pkg/sql/schema_changer.go | 62 +- pkg/sql/sem/tree/datum.go | 2 +- pkg/sql/type_change.go | 158 ++-- pkg/sql/walk.go | 1 + pkg/util/log/eventpb/ddl_events.pb.go | 858 ++++++++++++------ pkg/util/log/eventpb/ddl_events.proto | 10 + .../eventpb/eventlog_channels_generated.go | 3 + pkg/util/log/eventpb/json_encode_generated.go | 34 + 28 files changed, 1685 insertions(+), 545 deletions(-) create mode 100644 pkg/sql/multiregion_test.go diff --git a/docs/generated/eventlog.md b/docs/generated/eventlog.md index cf99e22862a8..6b23a96d872e 100644 --- a/docs/generated/eventlog.md +++ b/docs/generated/eventlog.md @@ -280,6 +280,29 @@ An event of type `alter_database_add_region` is recorded when a region is added | `RegionName` | The region being added. | yes | +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | yes | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `RegionName` | The region being dropped. | yes | + + #### Common fields | Field | Description | Sensitive | diff --git a/pkg/ccl/backupccl/backupbase/targets.go b/pkg/ccl/backupccl/backupbase/targets.go index 1a59b95fabfc..c591d3113923 100644 --- a/pkg/ccl/backupccl/backupbase/targets.go +++ b/pkg/ccl/backupccl/backupbase/targets.go @@ -392,7 +392,9 @@ func DescriptorsMatchingTargets( return ret, err } // Get all the types used by this table. - typeIDs, err := tableDesc.GetAllReferencedTypeIDs(getTypeByID) + desc := resolver.DescByID[tableDesc.GetParentID()] + dbDesc := desc.(catalog.DatabaseDescriptor) + typeIDs, err := tableDesc.GetAllReferencedTypeIDs(dbDesc, getTypeByID) if err != nil { return ret, err } @@ -465,7 +467,9 @@ func DescriptorsMatchingTargets( } } // Get all the types used by this table. - typeIDs, err := desc.GetAllReferencedTypeIDs(getTypeByID) + dbRaw := resolver.DescByID[desc.GetParentID()] + dbDesc := dbRaw.(catalog.DatabaseDescriptor) + typeIDs, err := desc.GetAllReferencedTypeIDs(dbDesc, getTypeByID) if err != nil { return ret, err } diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 0bd61e167b09..95418ceb7287 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -1066,7 +1066,15 @@ func createImportingDescriptors( // to the new tables being restored. for _, table := range mutableTables { // Collect all types used by this table. - typeIDs, err := table.GetAllReferencedTypeIDs(func(id descpb.ID) (catalog.TypeDescriptor, error) { + dbDesc, err := descsCol.GetImmutableDatabaseByID( + ctx, txn, table.GetParentID(), tree.DatabaseLookupFlags{ + AvoidCached: true, + IncludeOffline: true, + }) + if err != nil { + return err + } + typeIDs, err := table.GetAllReferencedTypeIDs(dbDesc, func(id descpb.ID) (catalog.TypeDescriptor, error) { return typesByID[id], nil }) if err != nil { @@ -1794,8 +1802,17 @@ func (r *restoreResumer) removeExistingTypeBackReferences( return typ, nil } + dbDesc, err := descsCol.GetImmutableDatabaseByID( + ctx, txn, tbl.GetParentID(), tree.DatabaseLookupFlags{ + AvoidCached: true, + IncludeOffline: true, + }) + if err != nil { + return err + } + // Get all types that this descriptor references. - referencedTypes, err := tbl.GetAllReferencedTypeIDs(lookup) + referencedTypes, err := tbl.GetAllReferencedTypeIDs(dbDesc, lookup) if err != nil { return err } diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel index 6428fb8a8e47..675fd3358ac5 100644 --- a/pkg/sql/BUILD.bazel +++ b/pkg/sql/BUILD.bazel @@ -438,6 +438,7 @@ go_test( "materialized_view_test.go", "metric_test.go", "metric_util_test.go", + "multiregion_test.go", "mutation_test.go", "namespace_test.go", "old_foreign_key_desc_test.go", diff --git a/pkg/sql/alter_database.go b/pkg/sql/alter_database.go index 31861a80768b..e43317d8805e 100644 --- a/pkg/sql/alter_database.go +++ b/pkg/sql/alter_database.go @@ -25,7 +25,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/roleoption" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/errors" ) @@ -96,8 +95,8 @@ func (p *planner) checkCanAlterDatabaseAndSetNewOwner( privs := desc.GetPrivileges() privs.SetOwner(newOwner) - // Log Alter Database Owner event. This is an auditable log event and is recorded - // in the same transaction as the table descriptor update. + // Log Alter Database Owner event. This is an auditable log event and is + // recorded in the same transaction as the table descriptor update. return p.logEvent(ctx, desc.GetID(), &eventpb.AlterDatabaseOwner{ @@ -161,7 +160,7 @@ func (n *alterDatabaseAddRegionNode) startExec(params runParams) error { // Add the region to the database descriptor. This function validates that the region // we're adding is an active member of the cluster and isn't already present in the // RegionConfig. - if err := params.p.addRegionToRegionConfig(params.ctx, n.desc, n.n); err != nil { + if err := params.p.addActiveRegionToRegionConfig(params.ctx, n.desc, n.n); err != nil { return err } @@ -234,9 +233,9 @@ func (n *alterDatabaseAddRegionNode) startExec(params runParams) error { return err } - // Log Alter Database Add Region event. This is an auditable log event and is recorded - // in the same transaction as the database descriptor, type descriptor, and zone - // configuration updates. + // Log Alter Database Add Region event. This is an auditable log event and is + // recorded in the same transaction as the database descriptor, type + // descriptor, and zone configuration updates. return params.p.logEvent(params.ctx, n.desc.GetID(), &eventpb.AlterDatabaseAddRegion{ @@ -249,6 +248,11 @@ func (n *alterDatabaseAddRegionNode) Next(runParams) (bool, error) { return fals func (n *alterDatabaseAddRegionNode) Values() tree.Datums { return tree.Datums{} } func (n *alterDatabaseAddRegionNode) Close(context.Context) {} +type alterDatabaseDropRegionNode struct { + n *tree.AlterDatabaseDropRegion + desc *dbdesc.Mutable +} + // AlterDatabaseDropRegion transforms a tree.AlterDatabaseDropRegion into a plan node. func (p *planner) AlterDatabaseDropRegion( ctx context.Context, n *tree.AlterDatabaseDropRegion, @@ -260,9 +264,96 @@ func (p *planner) AlterDatabaseDropRegion( ); err != nil { return nil, err } - return nil, unimplemented.NewWithIssue(58333, "implementation pending") + _, dbDesc, err := p.Descriptors().GetMutableDatabaseByName(ctx, p.txn, n.Name.String(), + tree.DatabaseLookupFlags{Required: true}) + if err != nil { + return nil, err + } + + // To drop the region, the user has to have CREATEDB privileges, + // or be an admin user. + if err := p.CheckRoleOption(ctx, roleoption.CREATEDB); err != nil { + return nil, err + } + + if !dbDesc.IsMultiRegion() { + return nil, pgerror.New(pgcode.InvalidDatabaseDefinition, "database has no regions to drop") + } + + if dbDesc.RegionConfig.PrimaryRegion == descpb.RegionName(n.Region) { + return nil, errors.WithHintf( + errors.Newf("cannot drop region %q", dbDesc.RegionConfig.PrimaryRegion), + "You must designate another region as the primary region or remove all "+ + "other regions before attempting to drop region %q", n.Region, + ) + } + + return &alterDatabaseDropRegionNode{n, dbDesc}, nil +} + +func (n *alterDatabaseDropRegionNode) startExec(params runParams) error { + typeDesc, err := params.p.Descriptors().GetMutableTypeVersionByID( + params.ctx, + params.p.txn, + n.desc.RegionConfig.RegionEnumID, + ) + if err != nil { + return err + } + + // dropEnumValue tries to remove the region value from the multi-region type + // descriptor. Among other things, it validates that the region is not in + // use by any tables. A region is considered "in use" if either a REGIONAL BY + // TABLE table is explicitly homed in that region or a row in a REGIONAL BY + // ROW table is homed in that region. The type schema changer is responsible + // for all the requisite validation. + if err := params.p.dropEnumValue(params.ctx, typeDesc, tree.EnumValue(n.n.Region)); err != nil { + return err + } + + idx := 0 + found := false + for i, region := range n.desc.RegionConfig.Regions { + if region.Name == descpb.RegionName(n.n.Region) { + idx = i + found = true + break + } + } + if !found { + // This shouldn't happen and is simply a sanity check to ensure the database + // descriptor regions and multi-region enum regions are indeed consistent. + return errors.AssertionFailedf( + "attempting to drop region %s not on database descriptor %d but found on type descriptor", + n.n.Region, n.desc.GetID(), + ) + } + + n.desc.RegionConfig.Regions = append(n.desc.RegionConfig.Regions[:idx], + n.desc.RegionConfig.Regions[idx+1:]...) + + if err := params.p.writeNonDropDatabaseChange( + params.ctx, + n.desc, + tree.AsStringWithFQNames(n.n, params.Ann()), + ); err != nil { + return err + } + + // Log Alter Database Drop Region event. This is an auditable log event and is + // recorded in the same transaction as the table descriptor update. + return params.p.logEvent(params.ctx, + n.desc.GetID(), + &eventpb.AlterDatabaseDropRegion{ + DatabaseName: n.desc.GetName(), + RegionName: n.n.Region.String(), + }) } +func (n *alterDatabaseDropRegionNode) Next(runParams) (bool, error) { return false, nil } +func (n *alterDatabaseDropRegionNode) Values() tree.Datums { return tree.Datums{} } +func (n *alterDatabaseDropRegionNode) Close(context.Context) {} + type alterDatabasePrimaryRegionNode struct { n *tree.AlterDatabasePrimaryRegion desc *dbdesc.Mutable @@ -359,12 +450,19 @@ func (n *alterDatabasePrimaryRegionNode) switchPrimaryRegion(params runParams) e return nil } -// addDefaultLocalityConfigToAllTables adds a locality config representing -// regional by table table's with affinity to the primary region to all table's -// inside the supplied database. +// addDefaultLocalityConfigToAllTables adds a default locality config to all +// tables inside the supplied database. The default locality config indicates +// that the table is a REGIONAL BY TABLE table homed in the primary region of +// the database. func addDefaultLocalityConfigToAllTables( - ctx context.Context, p *planner, desc *dbdesc.Immutable, + ctx context.Context, p *planner, desc *dbdesc.Immutable, regionEnumID descpb.ID, ) error { + if !desc.IsMultiRegion() { + return errors.AssertionFailedf( + "cannot add locality config to tables in non multi-region database with ID %d", + desc.GetID(), + ) + } b := p.Txn().NewBatch() if err := forEachTableDesc(ctx, p, desc, hideVirtual, func(immutable *dbdesc.Immutable, _ string, desc catalog.TableDescriptor) error { @@ -374,7 +472,13 @@ func addDefaultLocalityConfigToAllTables( if err != nil { return err } - mutDesc.SetTableLocalityRegionalByTable(tree.PrimaryRegionLocalityName) + + if err := p.alterTableDescLocalityToRegionalByTable( + ctx, tree.PrimaryRegionLocalityName, mutDesc, regionEnumID, + ); err != nil { + return err + } + if err := p.writeSchemaChangeToBatch(ctx, mutDesc, b); err != nil { return err } @@ -385,8 +489,8 @@ func addDefaultLocalityConfigToAllTables( return p.Txn().Run(ctx, b) } -// setInitialPrimaryRegion sets the primary region in cases where the database is already -// a multi-region database. +// setInitialPrimaryRegion sets the primary region in cases where the database +// is already a multi-region database. func (n *alterDatabasePrimaryRegionNode) setInitialPrimaryRegion(params runParams) error { // Create the region config structure to be added to the database descriptor. regionConfig, err := params.p.createRegionConfig( @@ -399,12 +503,19 @@ func (n *alterDatabasePrimaryRegionNode) setInitialPrimaryRegion(params runParam return err } - if err := addDefaultLocalityConfigToAllTables(params.ctx, params.p, &n.desc.Immutable); err != nil { + // Set the region config on the database descriptor. + n.desc.RegionConfig = regionConfig + + if err := addDefaultLocalityConfigToAllTables( + params.ctx, + params.p, + &n.desc.Immutable, + regionConfig.RegionEnumID, + ); err != nil { return err } // Write the modified database descriptor. - n.desc.RegionConfig = regionConfig if err := params.p.writeNonDropDatabaseChange( params.ctx, n.desc, @@ -453,8 +564,9 @@ func (n *alterDatabasePrimaryRegionNode) startExec(params runParams) error { } } - // Log Alter Database Primary Region event. This is an auditable log event and is recorded - // in the same transaction as the database descriptor, and zone configuration updates. + // Log Alter Database Primary Region event. This is an auditable log event and + // is recorded in the same transaction as the database descriptor, and zone + // configuration updates. return params.p.logEvent(params.ctx, n.desc.GetID(), &eventpb.AlterDatabasePrimaryRegion{ @@ -560,8 +672,9 @@ func (n *alterDatabaseSurvivalGoalNode) startExec(params runParams) error { return err } - // Log Alter Database Survival Goal event. This is an auditable log event and is recorded - // in the same transaction as the database descriptor, and zone configuration updates. + // Log Alter Database Survival Goal event. This is an auditable log event and + // is recorded in the same transaction as the database descriptor, and zone + // configuration updates. return params.p.logEvent(params.ctx, n.desc.GetID(), &eventpb.AlterDatabaseSurvivalGoal{ diff --git a/pkg/sql/alter_table_locality.go b/pkg/sql/alter_table_locality.go index c9ebe718e8d1..8ebfbff767cd 100644 --- a/pkg/sql/alter_table_locality.go +++ b/pkg/sql/alter_table_locality.go @@ -12,11 +12,14 @@ package sql import ( "context" + "fmt" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -112,7 +115,22 @@ func (n *alterTableSetLocalityNode) alterTableLocalityGlobalToRegionalByTable( ) } - n.tableDesc.SetTableLocalityRegionalByTable(n.n.Locality.TableRegion) + dbDesc, err := params.p.Descriptors().GetImmutableDatabaseByID( + params.ctx, params.p.txn, n.tableDesc.ParentID, tree.DatabaseLookupFlags{}) + if err != nil { + return err + } + + regionEnumID, err := dbDesc.MultiRegionEnumID() + if err != nil { + return err + } + + if err := params.p.alterTableDescLocalityToRegionalByTable( + params.ctx, n.n.Locality.TableRegion, n.tableDesc, regionEnumID, + ); err != nil { + return err + } // Finalize the alter by writing a new table descriptor and updating the zone // configuration. @@ -137,8 +155,14 @@ func (n *alterTableSetLocalityNode) alterTableLocalityRegionalByTableToGlobal( n.tableDesc.LocalityConfig, ) } - - n.tableDesc.SetTableLocalityGlobal() + regionEnumID, err := n.dbDesc.MultiRegionEnumID() + if err != nil { + return err + } + err = params.p.alterTableDescLocalityToGlobal(params.ctx, n.tableDesc, regionEnumID) + if err != nil { + return err + } // Finalize the alter by writing a new table descriptor and updating the zone // configuration. @@ -164,7 +188,22 @@ func (n *alterTableSetLocalityNode) alterTableLocalityRegionalByTableToRegionalB ) } - n.tableDesc.SetTableLocalityRegionalByTable(n.n.Locality.TableRegion) + dbDesc, err := params.p.Descriptors().GetImmutableDatabaseByID( + params.ctx, params.p.txn, n.tableDesc.ParentID, tree.DatabaseLookupFlags{}) + if err != nil { + return err + } + + regionEnumID, err := dbDesc.MultiRegionEnumID() + if err != nil { + return err + } + + if err := params.p.alterTableDescLocalityToRegionalByTable( + params.ctx, n.n.Locality.TableRegion, n.tableDesc, regionEnumID, + ); err != nil { + return err + } // Finalize the alter by writing a new table descriptor and updating the zone configuration. if err := n.validateAndWriteNewTableLocalityAndZoneConfig( @@ -439,3 +478,94 @@ func (n *alterTableSetLocalityNode) validateAndWriteNewTableLocalityAndZoneConfi return nil } + +// alterTableDescLocalityToRegionalByTable changes the locality of the given tableDesc +// to Regional By Table homed in the specified region. It also handles the +// dependency with the multi-region enum, if one exists. +func (p *planner) alterTableDescLocalityToRegionalByTable( + ctx context.Context, region tree.Name, tableDesc *tabledesc.Mutable, regionEnumID descpb.ID, +) error { + if tableDesc.GetMultiRegionEnumDependencyIfExists() { + if err := p.removeTypeBackReference(ctx, regionEnumID, tableDesc.GetID(), + fmt.Sprintf("remove back ref on mr-enum %d for table %d", regionEnumID, tableDesc.GetID()), + ); err != nil { + return err + } + } + tableDesc.SetTableLocalityRegionalByTable(region) + if tableDesc.GetMultiRegionEnumDependencyIfExists() { + return p.addTypeBackReference( + ctx, regionEnumID, tableDesc.ID, + fmt.Sprintf("add back ref on mr-enum %d for table %d", regionEnumID, tableDesc.GetID()), + ) + } + return nil +} + +// alterTableDescLocalityToGlobal changes the locality of the given tableDesc to +// global. It also removes the dependency on the multi-region enum, if it +// existed before the locality switch. +func (p *planner) alterTableDescLocalityToGlobal( + ctx context.Context, tableDesc *tabledesc.Mutable, regionEnumID descpb.ID, +) error { + if tableDesc.GetMultiRegionEnumDependencyIfExists() { + if err := p.removeTypeBackReference(ctx, regionEnumID, tableDesc.GetID(), + fmt.Sprintf("remove back ref no mr-enum %d for table %d", regionEnumID, tableDesc.GetID()), + ); err != nil { + return err + } + } + tableDesc.SetTableLocalityGlobal() + return nil +} + +// setNewLocalityConfig sets the locality config of the given table descriptor to +// the provided config. It also removes the dependency on the multi-region enum, +// if it existed before the locality switch. +func setNewLocalityConfig( + ctx context.Context, + desc *tabledesc.Mutable, + txn *kv.Txn, + b *kv.Batch, + config descpb.TableDescriptor_LocalityConfig, + kvTrace bool, + descsCol *descs.Collection, +) error { + getMultiRegionTypeDesc := func() (*typedesc.Mutable, error) { + dbDesc, err := descsCol.GetImmutableDatabaseByID(ctx, txn, desc.GetParentID(), tree.DatabaseLookupFlags{}) + if err != nil { + return nil, err + } + + regionEnumID, err := dbDesc.MultiRegionEnumID() + if err != nil { + return nil, err + } + return descsCol.GetMutableTypeVersionByID(ctx, txn, regionEnumID) + } + // If there was a dependency before on the multi-region enum before the + // new locality is set, we must unlink the dependency. + if desc.GetMultiRegionEnumDependencyIfExists() { + typ, err := getMultiRegionTypeDesc() + if err != nil { + return err + } + typ.RemoveReferencingDescriptorID(desc.GetID()) + if err := descsCol.WriteDescToBatch(ctx, kvTrace, typ, b); err != nil { + return err + } + } + desc.LocalityConfig = &config + // If there is a dependency after the new locality is set, we must add it. + if desc.GetMultiRegionEnumDependencyIfExists() { + typ, err := getMultiRegionTypeDesc() + if err != nil { + return err + } + typ.AddReferencingDescriptorID(desc.GetID()) + if err := descsCol.WriteDescToBatch(ctx, kvTrace, typ, b); err != nil { + return err + } + } + return nil +} diff --git a/pkg/sql/alter_type.go b/pkg/sql/alter_type.go index a7987a3e5dfe..be6399ef6ab6 100644 --- a/pkg/sql/alter_type.go +++ b/pkg/sql/alter_type.go @@ -114,10 +114,10 @@ func (n *alterTypeNode) startExec(params runParams) error { case *tree.AlterTypeDropValue: if params.p.SessionData().SafeUpdates { err = pgerror.DangerousStatementf( - "DROP VALUE is may cause view/default/computed expressions to stop working if the " + - "enum label is used inside them") + "DROP VALUE may cause view/default/computed expressions to stop working if the " + + "enum value is used inside them") } else { - err = params.p.dropEnumValue(params.ctx, n, t) + err = params.p.dropEnumValue(params.ctx, n.desc, t.Val) } default: err = errors.AssertionFailedf("unknown alter type cmd %s", t) @@ -145,7 +145,7 @@ func (n *alterTypeNode) startExec(params runParams) error { return nil } -func findMemberByName( +func findEnumMemberByName( desc *typedesc.Mutable, val tree.EnumValue, ) (bool, *descpb.TypeDescriptor_EnumMember) { for _, member := range desc.EnumMembers { @@ -164,20 +164,20 @@ func (p *planner) addEnumValue( return pgerror.Newf(pgcode.WrongObjectType, "%q is not an enum", desc.Name) } // See if the value already exists in the enum or not. - found, member := findMemberByName(desc, node.NewVal) + found, member := findEnumMemberByName(desc, node.NewVal) if found { if enumMemberIsRemoving(member) { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, - "enum label %q is being dropped, try again later", node.NewVal) + "enum value %q is being dropped, try again later", node.NewVal) } if node.IfNotExists { p.BufferClientNotice( ctx, - pgnotice.Newf("enum label %q already exists, skipping", node.NewVal), + pgnotice.Newf("enum value %q already exists, skipping", node.NewVal), ) return nil } - return pgerror.Newf(pgcode.DuplicateObject, "enum label %q already exists", node.NewVal) + return pgerror.Newf(pgcode.DuplicateObject, "enum value %q already exists", node.NewVal) } if err := desc.AddEnumValue(node); err != nil { @@ -187,28 +187,29 @@ func (p *planner) addEnumValue( } func (p *planner) dropEnumValue( - ctx context.Context, n *alterTypeNode, node *tree.AlterTypeDropValue, + ctx context.Context, desc *typedesc.Mutable, val tree.EnumValue, ) error { - if n.desc.Kind != descpb.TypeDescriptor_ENUM { - return pgerror.Newf(pgcode.WrongObjectType, "%q is not an enum", n.desc.Name) + if desc.Kind != descpb.TypeDescriptor_ENUM && + desc.Kind != descpb.TypeDescriptor_MULTIREGION_ENUM { + return pgerror.Newf(pgcode.WrongObjectType, "%q is not an enum", desc.Name) } - found, member := findMemberByName(n.desc, node.Val) + found, member := findEnumMemberByName(desc, val) if !found { - return pgerror.Newf(pgcode.UndefinedObject, "enum label %q does not exist", node.Val) + return pgerror.Newf(pgcode.UndefinedObject, "enum value %q does not exist", val) } - // Do not allow drops if the enum label isn't public yet. + // Do not allow drops if the enum value isn't public yet. if enumMemberIsRemoving(member) { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, - "enum label %q is already being dropped", node.Val) + "enum value %q is already being dropped", val) } if enumMemberIsAdding(member) { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, - "enum label %q is being added, try again later", node.Val) + "enum value %q is being added, try again later", val) } - n.desc.DropEnumValue(node.Val) - return p.writeTypeSchemaChange(ctx, n.desc, tree.AsStringWithFQNames(n.n, p.Ann())) + desc.DropEnumValue(val) + return p.writeTypeSchemaChange(ctx, desc, desc.Name) } func (p *planner) renameType(ctx context.Context, n *alterTypeNode, newName string) error { @@ -320,23 +321,23 @@ func (p *planner) renameTypeValue( enumMemberIndex = i } else if member.LogicalRepresentation == newVal { return pgerror.Newf(pgcode.DuplicateObject, - "enum label %s already exists", newVal) + "enum value %s already exists", newVal) } } // An enum member with the name oldVal was not found. if enumMemberIndex == -1 { return pgerror.Newf(pgcode.InvalidParameterValue, - "%s is not an existing enum label", oldVal) + "%s is not an existing enum value", oldVal) } if enumMemberIsRemoving(&n.desc.EnumMembers[enumMemberIndex]) { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, - "enum label %q is being dropped", oldVal) + "enum value %q is being dropped", oldVal) } if enumMemberIsAdding(&n.desc.EnumMembers[enumMemberIndex]) { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, - "enum label %q is being added, try again later", oldVal) + "enum value %q is being added, try again later", oldVal) } diff --git a/pkg/sql/catalog/descpb/structured.pb.go b/pkg/sql/catalog/descpb/structured.pb.go index d60431f259ed..16365941b895 100644 --- a/pkg/sql/catalog/descpb/structured.pb.go +++ b/pkg/sql/catalog/descpb/structured.pb.go @@ -75,7 +75,7 @@ func (x *ConstraintValidity) UnmarshalJSON(data []byte) error { return nil } func (ConstraintValidity) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{0} } // SystemColumnKind is an enum representing the different kind of system @@ -120,7 +120,7 @@ func (x *SystemColumnKind) UnmarshalJSON(data []byte) error { return nil } func (SystemColumnKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{1} } // State indicates whether a descriptor is public (i.e., normally visible, @@ -172,7 +172,7 @@ func (x *DescriptorState) UnmarshalJSON(data []byte) error { return nil } func (DescriptorState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{2} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{2} } // SurvivalGoal is the survival goal for a database. @@ -211,7 +211,7 @@ func (x *SurvivalGoal) UnmarshalJSON(data []byte) error { return nil } func (SurvivalGoal) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{3} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{3} } type ForeignKeyReference_Action int32 @@ -256,7 +256,7 @@ func (x *ForeignKeyReference_Action) UnmarshalJSON(data []byte) error { return nil } func (ForeignKeyReference_Action) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{0, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{0, 0} } // Match is the algorithm used to compare composite keys. @@ -296,7 +296,7 @@ func (x *ForeignKeyReference_Match) UnmarshalJSON(data []byte) error { return nil } func (ForeignKeyReference_Match) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{0, 1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{0, 1} } // The direction of a column in the index. @@ -333,7 +333,7 @@ func (x *IndexDescriptor_Direction) UnmarshalJSON(data []byte) error { return nil } func (IndexDescriptor_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{8, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{8, 0} } // The type of the index. @@ -370,7 +370,7 @@ func (x *IndexDescriptor_Type) UnmarshalJSON(data []byte) error { return nil } func (IndexDescriptor_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{8, 1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{8, 1} } type ConstraintToUpdate_ConstraintType int32 @@ -416,7 +416,7 @@ func (x *ConstraintToUpdate_ConstraintType) UnmarshalJSON(data []byte) error { return nil } func (ConstraintToUpdate_ConstraintType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{9, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{9, 0} } // A descriptor within a mutation is unavailable for reads, writes @@ -481,7 +481,7 @@ func (x *DescriptorMutation_State) UnmarshalJSON(data []byte) error { return nil } func (DescriptorMutation_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{13, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{13, 0} } // Direction of mutation. @@ -524,7 +524,7 @@ func (x *DescriptorMutation_Direction) UnmarshalJSON(data []byte) error { return nil } func (DescriptorMutation_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{13, 1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{13, 1} } // AuditMode indicates which auditing actions to take when this table is used. @@ -561,7 +561,7 @@ func (x *TableDescriptor_AuditMode) UnmarshalJSON(data []byte) error { return nil } func (TableDescriptor_AuditMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 0} } // Represents the kind of type that this type descriptor represents. @@ -606,7 +606,7 @@ func (x *TypeDescriptor_Kind) UnmarshalJSON(data []byte) error { return nil } func (TypeDescriptor_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{17, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{17, 0} } // Represents what operations are allowed on this ENUM member. @@ -647,7 +647,7 @@ func (x *TypeDescriptor_EnumMember_Capability) UnmarshalJSON(data []byte) error return nil } func (TypeDescriptor_EnumMember_Capability) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{17, 0, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{17, 0, 0} } type TypeDescriptor_EnumMember_Direction int32 @@ -689,7 +689,7 @@ func (x *TypeDescriptor_EnumMember_Direction) UnmarshalJSON(data []byte) error { return nil } func (TypeDescriptor_EnumMember_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{17, 0, 1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{17, 0, 1} } // ForeignKeyReference is deprecated, replaced by ForeignKeyConstraint in v19.2 @@ -719,7 +719,7 @@ func (m *ForeignKeyReference) Reset() { *m = ForeignKeyReference{} } func (m *ForeignKeyReference) String() string { return proto.CompactTextString(m) } func (*ForeignKeyReference) ProtoMessage() {} func (*ForeignKeyReference) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{0} } func (m *ForeignKeyReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -767,7 +767,7 @@ func (m *ForeignKeyConstraint) Reset() { *m = ForeignKeyConstraint{} } func (m *ForeignKeyConstraint) String() string { return proto.CompactTextString(m) } func (*ForeignKeyConstraint) ProtoMessage() {} func (*ForeignKeyConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{1} } func (m *ForeignKeyConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -805,7 +805,7 @@ func (m *UniqueWithoutIndexConstraint) Reset() { *m = UniqueWithoutIndex func (m *UniqueWithoutIndexConstraint) String() string { return proto.CompactTextString(m) } func (*UniqueWithoutIndexConstraint) ProtoMessage() {} func (*UniqueWithoutIndexConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{2} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{2} } func (m *UniqueWithoutIndexConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -873,7 +873,7 @@ func (m *ColumnDescriptor) Reset() { *m = ColumnDescriptor{} } func (m *ColumnDescriptor) String() string { return proto.CompactTextString(m) } func (*ColumnDescriptor) ProtoMessage() {} func (*ColumnDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{3} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{3} } func (m *ColumnDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -929,7 +929,7 @@ func (m *ColumnFamilyDescriptor) Reset() { *m = ColumnFamilyDescriptor{} func (m *ColumnFamilyDescriptor) String() string { return proto.CompactTextString(m) } func (*ColumnFamilyDescriptor) ProtoMessage() {} func (*ColumnFamilyDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{4} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{4} } func (m *ColumnFamilyDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -975,7 +975,7 @@ func (m *InterleaveDescriptor) Reset() { *m = InterleaveDescriptor{} } func (m *InterleaveDescriptor) String() string { return proto.CompactTextString(m) } func (*InterleaveDescriptor) ProtoMessage() {} func (*InterleaveDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{5} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{5} } func (m *InterleaveDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1019,7 +1019,7 @@ func (m *InterleaveDescriptor_Ancestor) Reset() { *m = InterleaveDescrip func (m *InterleaveDescriptor_Ancestor) String() string { return proto.CompactTextString(m) } func (*InterleaveDescriptor_Ancestor) ProtoMessage() {} func (*InterleaveDescriptor_Ancestor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{5, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{5, 0} } func (m *InterleaveDescriptor_Ancestor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1074,7 +1074,7 @@ func (m *ShardedDescriptor) Reset() { *m = ShardedDescriptor{} } func (m *ShardedDescriptor) String() string { return proto.CompactTextString(m) } func (*ShardedDescriptor) ProtoMessage() {} func (*ShardedDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{6} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{6} } func (m *ShardedDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1125,7 +1125,7 @@ func (m *PartitioningDescriptor) Reset() { *m = PartitioningDescriptor{} func (m *PartitioningDescriptor) String() string { return proto.CompactTextString(m) } func (*PartitioningDescriptor) ProtoMessage() {} func (*PartitioningDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{7} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{7} } func (m *PartitioningDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1168,7 +1168,7 @@ func (m *PartitioningDescriptor_List) Reset() { *m = PartitioningDescrip func (m *PartitioningDescriptor_List) String() string { return proto.CompactTextString(m) } func (*PartitioningDescriptor_List) ProtoMessage() {} func (*PartitioningDescriptor_List) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{7, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{7, 0} } func (m *PartitioningDescriptor_List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1213,7 +1213,7 @@ func (m *PartitioningDescriptor_Range) Reset() { *m = PartitioningDescri func (m *PartitioningDescriptor_Range) String() string { return proto.CompactTextString(m) } func (*PartitioningDescriptor_Range) ProtoMessage() {} func (*PartitioningDescriptor_Range) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{7, 1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{7, 1} } func (m *PartitioningDescriptor_Range) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1380,7 +1380,7 @@ func (m *IndexDescriptor) Reset() { *m = IndexDescriptor{} } func (m *IndexDescriptor) String() string { return proto.CompactTextString(m) } func (*IndexDescriptor) ProtoMessage() {} func (*IndexDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{8} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{8} } func (m *IndexDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1432,7 +1432,7 @@ func (m *ConstraintToUpdate) Reset() { *m = ConstraintToUpdate{} } func (m *ConstraintToUpdate) String() string { return proto.CompactTextString(m) } func (*ConstraintToUpdate) ProtoMessage() {} func (*ConstraintToUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{9} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{9} } func (m *ConstraintToUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1483,7 +1483,7 @@ func (m *PrimaryKeySwap) Reset() { *m = PrimaryKeySwap{} } func (m *PrimaryKeySwap) String() string { return proto.CompactTextString(m) } func (*PrimaryKeySwap) ProtoMessage() {} func (*PrimaryKeySwap) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{10} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{10} } func (m *PrimaryKeySwap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1517,7 +1517,7 @@ func (m *PrimaryKeySwap_LocalityConfigSwap) Reset() { *m = PrimaryKeySwa func (m *PrimaryKeySwap_LocalityConfigSwap) String() string { return proto.CompactTextString(m) } func (*PrimaryKeySwap_LocalityConfigSwap) ProtoMessage() {} func (*PrimaryKeySwap_LocalityConfigSwap) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{10, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{10, 0} } func (m *PrimaryKeySwap_LocalityConfigSwap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1557,7 +1557,7 @@ func (m *ComputedColumnSwap) Reset() { *m = ComputedColumnSwap{} } func (m *ComputedColumnSwap) String() string { return proto.CompactTextString(m) } func (*ComputedColumnSwap) ProtoMessage() {} func (*ComputedColumnSwap) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{11} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{11} } func (m *ComputedColumnSwap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1605,7 +1605,7 @@ func (m *MaterializedViewRefresh) Reset() { *m = MaterializedViewRefresh func (m *MaterializedViewRefresh) String() string { return proto.CompactTextString(m) } func (*MaterializedViewRefresh) ProtoMessage() {} func (*MaterializedViewRefresh) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{12} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{12} } func (m *MaterializedViewRefresh) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1662,7 +1662,7 @@ func (m *DescriptorMutation) Reset() { *m = DescriptorMutation{} } func (m *DescriptorMutation) String() string { return proto.CompactTextString(m) } func (*DescriptorMutation) ProtoMessage() {} func (*DescriptorMutation) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{13} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{13} } func (m *DescriptorMutation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2000,7 +2000,7 @@ func (m *NameInfo) Reset() { *m = NameInfo{} } func (m *NameInfo) String() string { return proto.CompactTextString(m) } func (*NameInfo) ProtoMessage() {} func (*NameInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{14} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{14} } func (m *NameInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2190,7 +2190,7 @@ func (m *TableDescriptor) Reset() { *m = TableDescriptor{} } func (m *TableDescriptor) String() string { return proto.CompactTextString(m) } func (*TableDescriptor) ProtoMessage() {} func (*TableDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15} } func (m *TableDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2518,7 +2518,7 @@ func (m *TableDescriptor_SchemaChangeLease) Reset() { *m = TableDescript func (m *TableDescriptor_SchemaChangeLease) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_SchemaChangeLease) ProtoMessage() {} func (*TableDescriptor_SchemaChangeLease) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 0} } func (m *TableDescriptor_SchemaChangeLease) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2564,7 +2564,7 @@ func (m *TableDescriptor_CheckConstraint) Reset() { *m = TableDescriptor func (m *TableDescriptor_CheckConstraint) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_CheckConstraint) ProtoMessage() {} func (*TableDescriptor_CheckConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 1} } func (m *TableDescriptor_CheckConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2604,7 +2604,7 @@ func (m *TableDescriptor_Reference) Reset() { *m = TableDescriptor_Refer func (m *TableDescriptor_Reference) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_Reference) ProtoMessage() {} func (*TableDescriptor_Reference) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 2} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 2} } func (m *TableDescriptor_Reference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2641,7 +2641,7 @@ func (m *TableDescriptor_MutationJob) Reset() { *m = TableDescriptor_Mut func (m *TableDescriptor_MutationJob) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_MutationJob) ProtoMessage() {} func (*TableDescriptor_MutationJob) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 3} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 3} } func (m *TableDescriptor_MutationJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2684,7 @@ func (m *TableDescriptor_SequenceOpts) Reset() { *m = TableDescriptor_Se func (m *TableDescriptor_SequenceOpts) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_SequenceOpts) ProtoMessage() {} func (*TableDescriptor_SequenceOpts) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 4} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 4} } func (m *TableDescriptor_SequenceOpts) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2724,7 +2724,7 @@ func (m *TableDescriptor_SequenceOpts_SequenceOwner) String() string { } func (*TableDescriptor_SequenceOpts_SequenceOwner) ProtoMessage() {} func (*TableDescriptor_SequenceOpts_SequenceOwner) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 4, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 4, 0} } func (m *TableDescriptor_SequenceOpts_SequenceOwner) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2764,7 +2764,7 @@ func (m *TableDescriptor_Replacement) Reset() { *m = TableDescriptor_Rep func (m *TableDescriptor_Replacement) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_Replacement) ProtoMessage() {} func (*TableDescriptor_Replacement) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 5} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 5} } func (m *TableDescriptor_Replacement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2801,7 +2801,7 @@ func (m *TableDescriptor_GCDescriptorMutation) Reset() { *m = TableDescr func (m *TableDescriptor_GCDescriptorMutation) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_GCDescriptorMutation) ProtoMessage() {} func (*TableDescriptor_GCDescriptorMutation) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 6} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 6} } func (m *TableDescriptor_GCDescriptorMutation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2838,7 +2838,7 @@ func (m *TableDescriptor_LocalityConfig) Reset() { *m = TableDescriptor_ func (m *TableDescriptor_LocalityConfig) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_LocalityConfig) ProtoMessage() {} func (*TableDescriptor_LocalityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 7} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 7} } func (m *TableDescriptor_LocalityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3005,6 +3005,20 @@ func _TableDescriptor_LocalityConfig_OneofSizer(msg proto.Message) (n int) { return n } +// REGIONAL BY TABLE tables have an "implicit" bidirectional dependency with +// the multi-region enum. The dependency is described "implicit" because +// even though no column on the table uses the multi-region type descriptor +// to store the homing region, a value from the type descriptor is stored in +// the locality config below (when the table is homed in the non-primary +// region). +// This changes how type dependencies are constructed for table descriptors. +// After the introduction of REGIONAL BY TABLE tables, a column on the table +// descriptor using a type is no longer a necessary (note it is still a +// sufficient) condition to establish a type dependency. As is the case with +// adding and dropping columns, this type dependency must be negotiated. As +// such, switching locality patterns or adding new locality configs must be +// done so that back references to the multi-region type descriptor are +// kept sane. type TableDescriptor_LocalityConfig_RegionalByTable struct { // Region is set if the table has an affinity with a non-primary region. Region *RegionName `protobuf:"bytes,1,opt,name=region,casttype=RegionName" json:"region,omitempty"` @@ -3018,7 +3032,7 @@ func (m *TableDescriptor_LocalityConfig_RegionalByTable) String() string { } func (*TableDescriptor_LocalityConfig_RegionalByTable) ProtoMessage() {} func (*TableDescriptor_LocalityConfig_RegionalByTable) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 7, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 7, 0} } func (m *TableDescriptor_LocalityConfig_RegionalByTable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3056,7 +3070,7 @@ func (m *TableDescriptor_LocalityConfig_RegionalByRow) String() string { } func (*TableDescriptor_LocalityConfig_RegionalByRow) ProtoMessage() {} func (*TableDescriptor_LocalityConfig_RegionalByRow) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 7, 1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 7, 1} } func (m *TableDescriptor_LocalityConfig_RegionalByRow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3088,7 +3102,7 @@ func (m *TableDescriptor_LocalityConfig_Global) Reset() { *m = TableDesc func (m *TableDescriptor_LocalityConfig_Global) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_LocalityConfig_Global) ProtoMessage() {} func (*TableDescriptor_LocalityConfig_Global) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{15, 7, 2} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{15, 7, 2} } func (m *TableDescriptor_LocalityConfig_Global) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3139,7 +3153,7 @@ func (m *DatabaseDescriptor) Reset() { *m = DatabaseDescriptor{} } func (m *DatabaseDescriptor) String() string { return proto.CompactTextString(m) } func (*DatabaseDescriptor) ProtoMessage() {} func (*DatabaseDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{16} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{16} } func (m *DatabaseDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3247,7 +3261,7 @@ func (m *DatabaseDescriptor_SchemaInfo) Reset() { *m = DatabaseDescripto func (m *DatabaseDescriptor_SchemaInfo) String() string { return proto.CompactTextString(m) } func (*DatabaseDescriptor_SchemaInfo) ProtoMessage() {} func (*DatabaseDescriptor_SchemaInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{16, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{16, 0} } func (m *DatabaseDescriptor_SchemaInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3287,7 +3301,7 @@ func (m *DatabaseDescriptor_RegionConfig) Reset() { *m = DatabaseDescrip func (m *DatabaseDescriptor_RegionConfig) String() string { return proto.CompactTextString(m) } func (*DatabaseDescriptor_RegionConfig) ProtoMessage() {} func (*DatabaseDescriptor_RegionConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{16, 2} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{16, 2} } func (m *DatabaseDescriptor_RegionConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3322,7 +3336,7 @@ func (m *DatabaseDescriptor_RegionConfig_Region) Reset() { func (m *DatabaseDescriptor_RegionConfig_Region) String() string { return proto.CompactTextString(m) } func (*DatabaseDescriptor_RegionConfig_Region) ProtoMessage() {} func (*DatabaseDescriptor_RegionConfig_Region) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{16, 2, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{16, 2, 0} } func (m *DatabaseDescriptor_RegionConfig_Region) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3385,7 +3399,7 @@ func (m *TypeDescriptor) Reset() { *m = TypeDescriptor{} } func (m *TypeDescriptor) String() string { return proto.CompactTextString(m) } func (*TypeDescriptor) ProtoMessage() {} func (*TypeDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{17} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{17} } func (m *TypeDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3534,7 +3548,7 @@ func (m *TypeDescriptor_EnumMember) Reset() { *m = TypeDescriptor_EnumMe func (m *TypeDescriptor_EnumMember) String() string { return proto.CompactTextString(m) } func (*TypeDescriptor_EnumMember) ProtoMessage() {} func (*TypeDescriptor_EnumMember) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{17, 0} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{17, 0} } func (m *TypeDescriptor_EnumMember) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3570,7 +3584,7 @@ func (m *TypeDescriptor_RegionConfig) Reset() { *m = TypeDescriptor_Regi func (m *TypeDescriptor_RegionConfig) String() string { return proto.CompactTextString(m) } func (*TypeDescriptor_RegionConfig) ProtoMessage() {} func (*TypeDescriptor_RegionConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{17, 1} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{17, 1} } func (m *TypeDescriptor_RegionConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3618,7 +3632,7 @@ func (m *SchemaDescriptor) Reset() { *m = SchemaDescriptor{} } func (m *SchemaDescriptor) String() string { return proto.CompactTextString(m) } func (*SchemaDescriptor) ProtoMessage() {} func (*SchemaDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{18} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{18} } func (m *SchemaDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3721,7 +3735,7 @@ func (m *Descriptor) Reset() { *m = Descriptor{} } func (m *Descriptor) String() string { return proto.CompactTextString(m) } func (*Descriptor) ProtoMessage() {} func (*Descriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_f072fa47ebd451a1, []int{19} + return fileDescriptor_structured_1c1d8ad700ca3188, []int{19} } func (m *Descriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -18571,10 +18585,10 @@ var ( ) func init() { - proto.RegisterFile("sql/catalog/descpb/structured.proto", fileDescriptor_structured_f072fa47ebd451a1) + proto.RegisterFile("sql/catalog/descpb/structured.proto", fileDescriptor_structured_1c1d8ad700ca3188) } -var fileDescriptor_structured_f072fa47ebd451a1 = []byte{ +var fileDescriptor_structured_1c1d8ad700ca3188 = []byte{ // 5256 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3c, 0xcb, 0x6f, 0x23, 0xe7, 0x7d, 0xe2, 0x9b, 0xfc, 0xf1, 0x35, 0xfa, 0x56, 0xbb, 0x4b, 0x2b, 0xb6, 0xa4, 0xe5, 0x7a, 0x6d, diff --git a/pkg/sql/catalog/descpb/structured.proto b/pkg/sql/catalog/descpb/structured.proto index c5e5b8e3b7f2..1e9c7e5d7307 100644 --- a/pkg/sql/catalog/descpb/structured.proto +++ b/pkg/sql/catalog/descpb/structured.proto @@ -1096,6 +1096,20 @@ message TableDescriptor { message LocalityConfig { option (gogoproto.equal) = true; + // REGIONAL BY TABLE tables have an "implicit" bidirectional dependency with + // the multi-region enum. The dependency is described "implicit" because + // even though no column on the table uses the multi-region type descriptor + // to store the homing region, a value from the type descriptor is stored in + // the locality config below (when the table is homed in the non-primary + // region). + // This changes how type dependencies are constructed for table descriptors. + // After the introduction of REGIONAL BY TABLE tables, a column on the table + // descriptor using a type is no longer a necessary (note it is still a + // sufficient) condition to establish a type dependency. As is the case with + // adding and dropping columns, this type dependency must be negotiated. As + // such, switching locality patterns or adding new locality configs must be + // done so that back references to the multi-region type descriptor are + // kept sane. message RegionalByTable { option (gogoproto.equal) = true; // Region is set if the table has an affinity with a non-primary region. diff --git a/pkg/sql/catalog/descriptor.go b/pkg/sql/catalog/descriptor.go index 8bd156c3514a..06813c8e47ae 100644 --- a/pkg/sql/catalog/descriptor.go +++ b/pkg/sql/catalog/descriptor.go @@ -240,7 +240,7 @@ type TableDescriptor interface { GetReplacementOf() descpb.TableDescriptor_Replacement GetAllReferencedTypeIDs( - getType func(descpb.ID) (TypeDescriptor, error), + databaseDesc DatabaseDescriptor, getType func(descpb.ID) (TypeDescriptor, error), ) (descpb.IDs, error) Validate(ctx context.Context, txn DescGetter) error @@ -264,6 +264,7 @@ type TableDescriptor interface { IsLocalityRegionalByRow() bool IsLocalityRegionalByTable() bool IsLocalityGlobal() bool + GetRegionalByTableRegion() (descpb.RegionName, error) } // Index is an interface around the index descriptor types. diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index 84ca778044e1..d2b01b29fe1b 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -787,8 +787,47 @@ func ForEachExprStringInTableDesc(descI catalog.TableDescriptor, f func(expr *st // this table references. It takes in a function that returns the TypeDescriptor // with the desired ID. func (desc *wrapper) GetAllReferencedTypeIDs( - getType func(descpb.ID) (catalog.TypeDescriptor, error), + dbDesc catalog.DatabaseDescriptor, getType func(descpb.ID) (catalog.TypeDescriptor, error), ) (descpb.IDs, error) { + ids, err := desc.getAllReferencedTypesInTableColumns(getType) + if err != nil { + return nil, err + } + + // REGIONAL BY TABLE tables may have a dependency with the multi-region enum. + exists := desc.GetMultiRegionEnumDependencyIfExists() + if exists { + regionEnumID, err := dbDesc.MultiRegionEnumID() + if err != nil { + return nil, err + } + ids[regionEnumID] = struct{}{} + } + + // Construct the output. + result := make(descpb.IDs, 0, len(ids)) + for id := range ids { + result = append(result, id) + } + + // Sort the output so that the order is deterministic. + sort.Sort(result) + return result, nil +} + +// getAllReferencedTypesInTableColumns returns a map of all user defined +// type descriptor IDs that this table references. Consider using +// GetAllReferencedTypeIDs when constructing the list of type descriptor IDs +// referenced by a table -- being used by a column is a sufficient but not +// necessary condition for a table to reference a type. +// One example of a table having a type descriptor dependency but no column to +// show for it is a REGIONAL BY TABLE table (homed in the non-primary region). +// These use a value from the multi-region enum to denote the homing region, but +// do so in the locality config as opposed to through a column. +// GetAllReferencedTypesByID accounts for this dependency. +func (desc *wrapper) getAllReferencedTypesInTableColumns( + getType func(descpb.ID) (catalog.TypeDescriptor, error), +) (map[descpb.ID]struct{}, error) { // All serialized expressions within a table descriptor are serialized // with type annotations as ID's, so this visitor will collect them all. visitor := &tree.TypeCollectorVisitor{ @@ -836,14 +875,7 @@ func (desc *wrapper) GetAllReferencedTypeIDs( } } - // Construct the output. - result := make(descpb.IDs, 0, len(ids)) - for id := range ids { - result = append(result, id) - } - // Sort the output so that the order is deterministic. - sort.Sort(result) - return result, nil + return ids, nil } func (desc *Mutable) initIDs() { @@ -1555,23 +1587,18 @@ func (desc *wrapper) validateCrossReferences(ctx context.Context, dg catalog.Des } // TODO(dan): Also validate SharedPrefixLen in the interleaves. - // Validate the all types present in the descriptor exist. typeMap caches - // accesses to TypeDescriptors, and is wrapped by getType. - // TODO(ajwerner): generalize this to a cached implementation of the - // DescGetter. - typeMap := make(map[descpb.ID]catalog.TypeDescriptor) - getType := func(id descpb.ID) (catalog.TypeDescriptor, error) { - if typeDesc, ok := typeMap[id]; ok { - return typeDesc, nil - } - typeDesc, err := catalog.GetTypeDescFromID(ctx, dg, id) - if err != nil { - return nil, errors.Wrapf(err, "type ID %d in descriptor not found", id) - } - typeMap[id] = typeDesc - return typeDesc, nil + // Validate the all types present in the descriptor exist. + getType := getTypeGetter(ctx, dg) + parentDesc, err := dg.GetDesc(ctx, desc.ParentID) + if err != nil { + return err + } + dbDesc, isDB := parentDesc.(catalog.DatabaseDescriptor) + if !isDB { + return errors.AssertionFailedf("parent id %d is not a database", dbDesc.GetID()) } - typeIDs, err := desc.GetAllReferencedTypeIDs(getType) + + typeIDs, err := desc.GetAllReferencedTypeIDs(dbDesc, getType) if err != nil { return err } @@ -1668,13 +1695,47 @@ func (desc *wrapper) ValidateTableLocalityConfig(ctx context.Context, dg catalog errors.Safe(regionsEnumID)) } + // REGIONAL BY TABLE tables homed in the primary region should include a + // reference to the multi-region type descriptor and a corresponding + // backreference. All other patterns should only contain a reference if there + // is an explicit column which uses the multi-region type descriptor as its + // *types.T. While the specific cases are validated below, we search for the + // region enum ID in the references list just once, up top here. + getTypes := getTypeGetter(ctx, dg) + typeIDs, err := desc.GetAllReferencedTypeIDs(db, getTypes) + if err != nil { + return err + } + regionEnumIDReferenced := false + for _, typeID := range typeIDs { + if typeID == regionsEnumID { + regionEnumIDReferenced = true + break + } + } + columnTypesTypeIDs, err := desc.getAllReferencedTypesInTableColumns(getTypes) + if err != nil { + return err + } switch lc := desc.LocalityConfig.Locality.(type) { case *descpb.TableDescriptor_LocalityConfig_Global_: + if regionEnumIDReferenced { + if _, found := columnTypesTypeIDs[regionsEnumID]; !found { + return errors.AssertionFailedf( + "expected no region Enum ID to be referenced by a GLOBAL TABLE: %q"+ + " but found: %d", + desc.GetName(), + regionsEnumDesc.GetID(), + ) + } + } case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: if !desc.IsPartitionAllBy() { return errors.AssertionFailedf("expected REGIONAL BY ROW table to have PartitionAllBy set") } case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + + // Table is homed in an explicit (non-primary) region. if lc.RegionalByTable.Region != nil { foundRegion := false regions, err := regionsEnumDesc.RegionNames() @@ -1699,6 +1760,28 @@ func (desc *wrapper) ValidateTableLocalityConfig(ctx context.Context, dg catalog strings.Join(regions.ToStrings(), ", "), ) } + if !regionEnumIDReferenced { + return errors.AssertionFailedf( + "expected multi-region enum ID %d to be referenced on REGIONAL BY TABLE: %q locality "+ + "config, but did not find it", + regionsEnumID, + desc.GetName(), + ) + } + } else { + if regionEnumIDReferenced { + // It may be the case that the multi-region type descriptor is used + // as the type of the table column. Validations should only fail if + // that is not the case. + if _, found := columnTypesTypeIDs[regionsEnumID]; !found { + return errors.AssertionFailedf( + "expected no region Enum ID to be referenced by a REGIONAL BY TABLE: %q homed in the "+ + "primary region, but found: %d", + desc.GetName(), + regionsEnumDesc.GetID(), + ) + } + } } default: return pgerror.Newf( @@ -1710,6 +1793,26 @@ func (desc *wrapper) ValidateTableLocalityConfig(ctx context.Context, dg catalog return nil } +func getTypeGetter( + ctx context.Context, dg catalog.DescGetter, +) func(descpb.ID) (catalog.TypeDescriptor, error) { + // typeMap caches accesses to TypeDescriptors, and is wrapped by getType. + // TODO(ajwerner): generalize this to a cached implementation of the + // DescGetter. + typeMap := make(map[descpb.ID]catalog.TypeDescriptor) + return func(id descpb.ID) (catalog.TypeDescriptor, error) { + if typeDesc, ok := typeMap[id]; ok { + return typeDesc, nil + } + typeDesc, err := catalog.GetTypeDescFromID(ctx, dg, id) + if err != nil { + return nil, errors.Wrapf(err, "type ID %d in descriptor not found", id) + } + typeMap[id] = typeDesc + return typeDesc, nil + } +} + // ValidateIndexNameIsUnique validates that the index name does not exist. func (desc *wrapper) ValidateIndexNameIsUnique(indexName string) error { if catalog.FindNonDropIndex(desc, func(idx catalog.Index) bool { @@ -3888,10 +3991,41 @@ func (desc *wrapper) IsLocalityGlobal() bool { return desc.LocalityConfig.GetGlobal() != nil } +// GetRegionalTableRegion returns the region a REGIONAL BY TABLE table is +// homed in. +func (desc *wrapper) GetRegionalByTableRegion() (descpb.RegionName, error) { + if !desc.IsLocalityRegionalByTable() { + return "", errors.New("is not REGIONAL BY TABLE") + } + region := desc.LocalityConfig.GetRegionalByTable().Region + if region == nil { + return descpb.RegionName(tree.PrimaryRegionLocalityName), nil + } + return *region, nil +} + +// GetMultiRegionEnumDependency returns true if the given table has an "implicit" +// dependency on the multi-region enum. An implicit dependency exists for +// REGIONAL BY TABLE table's which are homed in an explicit region +// (i.e non-primary region). Even though these tables don't have a column +// denoting their locality, their region config uses a value from the +// multi-region enum. As such, any drop validation or locality switches must +// honor this implicit dependency. +func (desc *wrapper) GetMultiRegionEnumDependencyIfExists() bool { + if desc.IsLocalityRegionalByTable() { + regionName, _ := desc.GetRegionalByTableRegion() + return regionName != descpb.RegionName(tree.PrimaryRegionLocalityName) + } + return false +} + // SetTableLocalityRegionalByTable sets the descriptor's locality config to // regional at the table level in the supplied region. An empty region name -// (or its alias PrimaryRegionLocalityName) denotes that the table has affinity -// to the primary region. +// (or its alias PrimaryRegionLocalityName) denotes that the table is homed in +// the primary region. +// SetTableLocalityRegionalByTable doesn't account for the locality config that +// was previously set on the descriptor. Instead, you may want to use: +// (planner) alterTableDescLocalityToRegionalByTable. func (desc *Mutable) SetTableLocalityRegionalByTable(region tree.Name) { lc := LocalityConfigRegionalByTable(region) desc.LocalityConfig = &lc @@ -3912,6 +4046,9 @@ func LocalityConfigRegionalByTable(region tree.Name) descpb.TableDescriptor_Loca // SetTableLocalityRegionalByRow sets the descriptor's locality config to // regional at the row level. An empty regionColName denotes the default // crdb_region partitioning column. +// SetTableLocalityRegionalByRow doesn't account for the locality config that +// was previously set on the descriptor, and the dependency unlinking that it +// entails. func (desc *Mutable) SetTableLocalityRegionalByRow(regionColName tree.Name) { lc := LocalityConfigRegionalByRow(regionColName) desc.LocalityConfig = &lc @@ -3932,6 +4069,9 @@ func LocalityConfigRegionalByRow(regionColName tree.Name) descpb.TableDescriptor // SetTableLocalityGlobal sets the descriptor's locality config to a global // table. +// SetLocalityGlobal doesn't account for the locality config that was previously +// set on the descriptor. Instead, you may want to use +// (planner) alterTableDescLocalityToGlobal. func (desc *Mutable) SetTableLocalityGlobal() { lc := LocalityConfigGlobal() desc.LocalityConfig = &lc diff --git a/pkg/sql/catalog/typedesc/type_desc.go b/pkg/sql/catalog/typedesc/type_desc.go index 90540d9c33d5..88ed46ef5379 100644 --- a/pkg/sql/catalog/typedesc/type_desc.go +++ b/pkg/sql/catalog/typedesc/type_desc.go @@ -347,7 +347,7 @@ func (desc *Mutable) AddEnumValue(node *tree.AlterTypeAddValue) error { } } if foundIndex == -1 { - return pgerror.Newf(pgcode.InvalidParameterValue, "%q is not an existing enum label", existing) + return pgerror.Newf(pgcode.InvalidParameterValue, "%q is not an existing enum value", existing) } pos = foundIndex @@ -429,6 +429,11 @@ func (e EnumMembers) Less(i, j int) bool { } func (e EnumMembers) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func isBeingDropped(member *descpb.TypeDescriptor_EnumMember) bool { + return member.Capability == descpb.TypeDescriptor_EnumMember_READ_ONLY && + member.Direction == descpb.TypeDescriptor_EnumMember_REMOVE +} + // Validate performs validation on the TypeDescriptor. func (desc *Immutable) Validate(ctx context.Context, dg catalog.DescGetter) error { // Validate local properties of the descriptor. @@ -469,7 +474,7 @@ func (desc *Immutable) Validate(ctx context.Context, dg catalog.DescGetter) erro return errors.AssertionFailedf("duplicate enum physical rep %v", desc.EnumMembers[i].PhysicalRepresentation) } } - // Ensure there are no duplicate enum labels. + // Ensure there are no duplicate enum values. members := make(map[string]struct{}, len(desc.EnumMembers)) for i := range desc.EnumMembers { _, ok := members[desc.EnumMembers[i].LogicalRepresentation] @@ -560,7 +565,14 @@ func (desc *Immutable) Validate(ctx context.Context, dg catalog.DescGetter) erro return err } - if len(desc.EnumMembers) != len(dbRegions) { + // Count the number of regions that aren't being dropped. + numRegions := 0 + for _, member := range desc.EnumMembers { + if !isBeingDropped(&member) { + numRegions++ + } + } + if numRegions != len(dbRegions) { return errors.AssertionFailedf( "unexpected number of regions on db desc: %d expected %d", len(dbRegions), len(desc.EnumMembers)) @@ -571,8 +583,11 @@ func (desc *Immutable) Validate(ctx context.Context, dg catalog.DescGetter) erro regions[region] = struct{}{} } - for i := range desc.EnumMembers { - enumRegion := descpb.RegionName(desc.EnumMembers[i].LogicalRepresentation) + for _, member := range desc.EnumMembers { + if isBeingDropped(&member) { + continue + } + enumRegion := descpb.RegionName(member.LogicalRepresentation) if _, ok := regions[enumRegion]; !ok { return errors.AssertionFailedf("did not find %q region on database descriptor", enumRegion) } diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index 629475e50632..9425574451a1 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -383,6 +383,24 @@ func (n *createTableNode) startExec(params runParams) error { ); err != nil { return err } + // Save the reference on the multi-region enum if there is a dependency with + // the descriptor. + if desc.GetMultiRegionEnumDependencyIfExists() { + typeDesc, err := params.p.Descriptors().GetMutableTypeVersionByID( + params.ctx, + params.p.txn, + dbDesc.RegionConfig.RegionEnumID, + ) + if err != nil { + return errors.Wrap(err, "error resolving multi-region enum") + } + typeDesc.AddReferencingDescriptorID(desc.ID) + err = params.p.writeTypeSchemaChange( + params.ctx, typeDesc, "add REGIONAL BY TABLE back reference") + if err != nil { + return errors.Wrap(err, "error adding backreference to multi-region enum") + } + } } dg := catalogkv.NewOneLevelUncachedDescGetter(params.p.txn, params.ExecCfg().Codec) @@ -1424,8 +1442,7 @@ func NewTableDesc( vt resolver.SchemaResolver, st *cluster.Settings, n *tree.CreateTable, - parentID, parentSchemaID, id descpb.ID, - regionEnumID descpb.ID, + parentID, parentSchemaID, id, regionEnumID descpb.ID, creationTime hlc.Timestamp, privileges *descpb.PrivilegeDescriptor, affected map[descpb.ID]*tabledesc.Mutable, diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index d3ac262916d5..deded748f3dc 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -258,9 +258,9 @@ func validateDatabaseRegionConfig(regionConfig descpb.DatabaseDescriptor_RegionC return nil } -// addRegionToRegionConfig adds the supplied region to the RegionConfig in the -// supplied database descriptor. -func (p *planner) addRegionToRegionConfig( +// addActiveRegionToRegionConfig adds the supplied region to the RegionConfig in +// the supplied database descriptor if the region is currently active. +func (p *planner) addActiveRegionToRegionConfig( ctx context.Context, desc *dbdesc.Mutable, regionToAdd *tree.AlterDatabaseAddRegion, ) error { liveRegions, err := p.getLiveClusterRegions(ctx) @@ -268,13 +268,16 @@ func (p *planner) addRegionToRegionConfig( return err } - regionConfig := desc.RegionConfig - // Ensure that the region we're adding is currently active. region := descpb.RegionName(regionToAdd.Region) if err := checkLiveClusterRegion(liveRegions, region); err != nil { return err } + return addRegionToRegionConfig(desc, region) +} + +func addRegionToRegionConfig(desc *dbdesc.Mutable, region descpb.RegionName) error { + regionConfig := desc.RegionConfig // Ensure that the region doesn't already exist in the database. for _, r := range regionConfig.Regions { @@ -305,6 +308,7 @@ func (p *planner) addRegionToRegionConfig( } return nil + } // createRegionConfig creates a new region config from the given parameters. diff --git a/pkg/sql/drop_type.go b/pkg/sql/drop_type.go index 377773826c76..cf72dfba7bd3 100644 --- a/pkg/sql/drop_type.go +++ b/pkg/sql/drop_type.go @@ -194,7 +194,12 @@ func (p *planner) removeTypeBackReference( func (p *planner) addBackRefsFromAllTypesInTable( ctx context.Context, desc *tabledesc.Mutable, ) error { - typeIDs, err := desc.GetAllReferencedTypeIDs(func(id descpb.ID) (catalog.TypeDescriptor, error) { + dbDesc, err := p.Descriptors().GetImmutableDatabaseByID( + ctx, p.txn, desc.GetParentID(), tree.DatabaseLookupFlags{}) + if err != nil { + return err + } + typeIDs, err := desc.GetAllReferencedTypeIDs(dbDesc, func(id descpb.ID) (catalog.TypeDescriptor, error) { mutDesc, err := p.Descriptors().GetMutableTypeVersionByID(ctx, p.txn, id) if err != nil { return nil, err @@ -216,7 +221,12 @@ func (p *planner) addBackRefsFromAllTypesInTable( func (p *planner) removeBackRefsFromAllTypesInTable( ctx context.Context, desc *tabledesc.Mutable, ) error { - typeIDs, err := desc.GetAllReferencedTypeIDs(func(id descpb.ID) (catalog.TypeDescriptor, error) { + dbDesc, err := p.Descriptors().GetImmutableDatabaseByID( + ctx, p.txn, desc.GetParentID(), tree.DatabaseLookupFlags{}) + if err != nil { + return err + } + typeIDs, err := desc.GetAllReferencedTypeIDs(dbDesc, func(id descpb.ID) (catalog.TypeDescriptor, error) { mutDesc, err := p.Descriptors().GetMutableTypeVersionByID(ctx, p.txn, id) if err != nil { return nil, err diff --git a/pkg/sql/logictest/testdata/logic_test/alter_type b/pkg/sql/logictest/testdata/logic_test/alter_type index 41504ff00431..225ea7a498ae 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_type +++ b/pkg/sql/logictest/testdata/logic_test/alter_type @@ -82,11 +82,11 @@ statement ok CREATE TYPE names AS ENUM ('james', 'johnny') # Cannot rename a value to a value that already a member of the type. -statement error enum label johnny already exists +statement error enum value johnny already exists ALTER TYPE names RENAME VALUE 'james' TO 'johnny' # Cannot rename a value that is not a member of the type. -statement error jim is not an existing enum label +statement error jim is not an existing enum value ALTER TYPE names RENAME VALUE 'jim' TO 'jimmy' statement ok @@ -142,13 +142,13 @@ SELECT enum_range('c'::build) {c} # Test some error cases. -statement error pq: enum label \"c\" already exists +statement error pq: enum value \"c\" already exists ALTER TYPE build ADD VALUE 'c' -statement error pq: \"b\" is not an existing enum label +statement error pq: \"b\" is not an existing enum value ALTER TYPE build ADD VALUE 'a' BEFORE 'b' -statement error pq: \"b\" is not an existing enum label +statement error pq: \"b\" is not an existing enum value ALTER TYPE build ADD VALUE 'a' AFTER 'b' statement ok @@ -219,7 +219,7 @@ SELECT enum_last('c'::build) ---- f -statement error pq: enum label \"g\" is not yet public +statement error pq: enum value \"g\" is not yet public INSERT INTO new_enum_values VALUES ('g') statement ok @@ -371,7 +371,7 @@ statement ok CREATE VIEW v as SELECT 'd':::alphabets; CREATE TABLE uses_alphabets_2(k INT PRIMARY KEY, v alphabets DEFAULT 'e'); -statement error pq: could not validate enum value removal for "d": count-value-usage: enum label "d" is not yet public +statement error pq: could not validate removal of enum value "d": count-value-usage: enum value "d" is not yet public ALTER TYPE alphabets DROP VALUE 'd' statement ok @@ -408,7 +408,7 @@ subtest add_drop_same_value_in_txn statement ok CREATE TYPE a AS ENUM('a') -statement error enum label "b" is being added, try again later +statement error enum value "b" is being added, try again later BEGIN; ALTER TYPE a ADD VALUE 'b'; ALTER TYPE a DROP VALUE 'b'; @@ -416,7 +416,7 @@ ALTER TYPE a DROP VALUE 'b'; statement ok ROLLBACK -statement error enum label "a" is being dropped, try again later +statement error enum value "a" is being dropped, try again later BEGIN; ALTER TYPE a DROP VALUE 'a'; ALTER TYPE a ADD VALUE 'a'; @@ -425,7 +425,7 @@ statement ok ROLLBACK -statement error pq: enum label "a" is already being dropped +statement error pq: enum value "a" is already being dropped BEGIN; ALTER TYPE a DROP VALUE 'a'; ALTER TYPE a DROP VALUE 'a' @@ -435,7 +435,7 @@ ROLLBACK subtest if_not_exists_in_same_txn -statement error enum label "a" is being dropped, try again later +statement error enum value "a" is being dropped, try again later BEGIN; ALTER TYPE a DROP VALUE 'a'; ALTER TYPE a ADD VALUE IF NOT EXISTS 'a'; @@ -451,7 +451,7 @@ COMMIT subtest add_rename_in_same_txn -statement error enum label "c" is being added, try again later +statement error enum value "c" is being added, try again later BEGIN; ALTER TYPE a ADD VALUE 'c'; ALTER TYPE a RENAME VALUE 'c' TO 'new_name'; @@ -461,7 +461,7 @@ ROLLBACK subtest drop_rename_in_same_txn -statement error enum label "a" is being dropped +statement error enum value "a" is being dropped BEGIN; ALTER TYPE a DROP VALUE 'a'; ALTER TYPE a RENAME VALUE 'a' TO 'new_name'; diff --git a/pkg/sql/logictest/testdata/logic_test/crdb_internal_tenant b/pkg/sql/logictest/testdata/logic_test/crdb_internal_tenant index db7df068d49e..2d4b9d49e710 100644 --- a/pkg/sql/logictest/testdata/logic_test/crdb_internal_tenant +++ b/pkg/sql/logictest/testdata/logic_test/crdb_internal_tenant @@ -10,6 +10,7 @@ SELECT node_id, name FROM crdb_internal.leases ORDER BY name ---- 0 role_members 0 role_options +0 system 0 test 0 users diff --git a/pkg/sql/logictest/testdata/logic_test/multiregion b/pkg/sql/logictest/testdata/logic_test/multiregion index a9e08985de0d..7e0575cee2dc 100644 --- a/pkg/sql/logictest/testdata/logic_test/multiregion +++ b/pkg/sql/logictest/testdata/logic_test/multiregion @@ -85,6 +85,9 @@ DROP TYPE multi_region_test_db.public.crdb_internal_region statement error "multi_region_test_db.public.crdb_internal_region" is a multi-region enum and can't be modified using the alter type command ALTER TYPE multi_region_test_db.public.crdb_internal_region ADD VALUE 'us-east-1' +statement error "multi_region_test_db.public.crdb_internal_region" is a multi-region enum and can't be modified using the alter type command +ALTER TYPE multi_region_test_db.public.crdb_internal_region DROP VALUE 'us-east-1' + statement error region "region_no_exists" does not exist\nHINT:.*valid regions: ap-southeast-2, ca-central-1, us-east-1 CREATE DATABASE invalid_region_db PRIMARY REGION "region_no_exists" REGION "region_no_exists" @@ -443,7 +446,7 @@ ALTER DATABASE alter_test_db ADD REGION "ap-southeast-2" statement error cannot add region ALTER DATABASE new_db ADD REGION "us-west-1" -statement error implementation pending +statement error pq: database has no regions to drop ALTER DATABASE new_db DROP REGION "us-west-1" statement ok @@ -828,3 +831,136 @@ CREATE TABLE public.t ( CONSTRAINT "primary" PRIMARY KEY (rowid ASC), FAMILY "primary" (k, rowid) ) LOCALITY REGIONAL BY TABLE IN PRIMARY REGION + +statement ok +CREATE DATABASE non_multi_region_db + +statement error pq: database has no regions to drop +ALTER DATABASE non_multi_region_db DROP REGION "ca-central-1" + +statement ok +CREATE DATABASE drop_region_db PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2", "us-east-1" + +statement error pq: cannot drop region "ca-central-1"\nHINT: You must designate another region as the primary region or remove all other regions before attempting to drop region "ca-central-1" +ALTER DATABASE drop_region_db DROP REGION "ca-central-1" + +statement ok +ALTER DATABASE drop_region_db DROP REGION "us-east-1" + +# Ensure that events are generated for dropping the region +query IT +SELECT "reportingID", info::JSONB - 'Timestamp' - 'DescriptorID' +FROM system.eventlog +WHERE "eventType" = 'alter_database_drop_region' +---- +1 {"DatabaseName": "drop_region_db", "EventType": "alter_database_drop_region", "RegionName": "\"us-east-1\"", "Statement": "ALTER DATABASE drop_region_db DROP REGION \"us-east-1\"", "User": "root"} + +query TTBT colnames +SHOW REGIONS FROM DATABASE drop_region_db +---- +database region primary zones +drop_region_db ca-central-1 true {ca-az1,ca-az2,ca-az3} +drop_region_db ap-southeast-2 false {ap-az1,ap-az2,ap-az3} + +query TTTT colnames +SHOW ENUMS FROM drop_region_db +---- +schema name values owner +public crdb_internal_region {ap-southeast-2,ca-central-1} root + +statement error pq: region "us-east-1" has not been added to database "drop_region_db" +CREATE TABLE drop_region_db.public.t(a int) LOCALITY REGIONAL BY TABLE IN "us-east-1" + +statement ok +CREATE TABLE drop_region_db.public.t(a int) LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" + +statement error pq: could not remove enum value "ap-southeast-2" as it is the home region for table "t" +ALTER DATABASE drop_region_db DROP REGION "ap-southeast-2" + +statement ok +DROP TABLE drop_region_db.public.t + +statement ok +ALTER DATABASE drop_region_db DROP REGION "ap-southeast-2" + +# Test a table that is implicitly homed in the primary region because it was +# created before the first region was added to the multi-region DB. +statement ok +CREATE DATABASE start_off_non_multi_region; +CREATE TABLE start_off_non_multi_region.public.t(a INT); +ALTER DATABASE start_off_non_multi_region PRIMARY REGION "ca-central-1"; +ALTER DATABASE start_off_non_multi_region ADD REGION "ap-southeast-2" + +statement error pq: cannot drop region "ca-central-1"\nHINT: You must designate another region as the primary region or remove all other regions before attempting to drop region "ca-central-1" +ALTER DATABASE start_off_non_multi_region DROP REGION "ca-central-1" + +statement ok +ALTER DATABASE start_off_non_multi_region PRIMARY REGION "ap-southeast-2" + +# Ensure that the table t does not disallow us from dropping the old primary region, +# even though it was homed there before the primary region was switched. +statement ok +ALTER DATABASE start_off_non_multi_region DROP REGION "ca-central-1" + +# Test drops in a transaction. +statement ok +CREATE DATABASE txn_database_drop_regions PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2", "us-east-1" + +query TTBT colnames +SHOW REGIONS FROM DATABASE txn_database_drop_regions +---- +database region primary zones +txn_database_drop_regions ca-central-1 true {ca-az1,ca-az2,ca-az3} +txn_database_drop_regions ap-southeast-2 false {ap-az1,ap-az2,ap-az3} +txn_database_drop_regions us-east-1 false {us-az1,us-az2,us-az3} + + +statement ok +BEGIN; +ALTER DATABASE txn_database_drop_regions DROP REGION "us-east-1"; +ALTER DATABASE txn_database_drop_regions DROP REGION "ap-southeast-2"; +COMMIT; + +query TTBT colnames +SHOW REGIONS FROM DATABASE txn_database_drop_regions +---- +database region primary zones +txn_database_drop_regions ca-central-1 true {ca-az1,ca-az2,ca-az3} + +statement ok +CREATE DATABASE drop_regions_alter_patterns PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2", "us-east-1" + +statement ok +USE drop_regions_alter_patterns + +statement ok +CREATE TABLE east() LOCALITY REGIONAL BY TABLE IN "us-east-1" + +statement ok +CREATE TABLE southeast() LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" + +statement error pq: could not remove enum value "ap-southeast-2" as it is the home region for table "southeast" +ALTER DATABASE drop_regions_alter_patterns DROP REGION "ap-southeast-2" + +statement ok +ALTER TABLE southeast SET LOCALITY REGIONAL BY TABLE IN PRIMARY REGION + +statement ok +ALTER DATABASE drop_regions_alter_patterns DROP REGION "ap-southeast-2" + +statement error pq: could not remove enum value "us-east-1" as it is the home region for table "east" +ALTER DATABASE drop_regions_alter_patterns DROP REGION "us-east-1" + +statement ok +ALTER TABLE east SET LOCALITY GLOBAL + +statement ok +ALTER DATABASE drop_regions_alter_patterns DROP REGION "us-east-1" + +# Drop the tables that held a dependency with the type descriptor before their +# alter statements. This ensures that the type descriptor dependency is +# successfully unlinked, as we expect validation to run after the test on the +# type descriptor. +statement ok +DROP TABLE east; +DROP TABLE southeast; diff --git a/pkg/sql/logictest/testdata/logic_test/notice b/pkg/sql/logictest/testdata/logic_test/notice index 51479ff17d95..cffbd1040225 100644 --- a/pkg/sql/logictest/testdata/logic_test/notice +++ b/pkg/sql/logictest/testdata/logic_test/notice @@ -42,7 +42,7 @@ ALTER TYPE color ADD VALUE 'black' query T noticetrace ALTER TYPE color ADD VALUE IF NOT EXISTS 'black' ---- -NOTICE: enum label "black" already exists, skipping +NOTICE: enum value "black" already exists, skipping statement ok CREATE MATERIALIZED VIEW v AS SELECT 1 diff --git a/pkg/sql/multiregion_test.go b/pkg/sql/multiregion_test.go new file mode 100644 index 000000000000..bc4a20aabbee --- /dev/null +++ b/pkg/sql/multiregion_test.go @@ -0,0 +1,129 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sql_test + +import ( + "context" + "fmt" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/errors" +) + +func TestSettingPrimaryRegionAmidstDrop(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + numServers := 2 + serverArgs := make(map[int]base.TestServerArgs) + + regionNames := make([]string, numServers) + for i := 0; i < numServers; i++ { + // "us-east1", "us-east2"... + regionNames[i] = fmt.Sprintf("us-east%d", i+1) + } + + var mu syncutil.Mutex + dropRegionStarted := make(chan struct{}) + waitForPrimaryRegionSwitch := make(chan struct{}) + dropRegionFinished := make(chan struct{}) + for i := 0; i < numServers; i++ { + serverArgs[i] = base.TestServerArgs{ + Knobs: base.TestingKnobs{ + SQLTypeSchemaChanger: &sql.TypeSchemaChangerTestingKnobs{ + RunBeforeEnumMemberPromotion: func() { + mu.Lock() + defer mu.Unlock() + if dropRegionStarted != nil { + close(dropRegionStarted) + <-waitForPrimaryRegionSwitch + dropRegionStarted = nil + } + }, + }, + }, + Locality: roachpb.Locality{ + Tiers: []roachpb.Tier{{Key: "region", Value: regionNames[i]}}, + }, + } + } + + tc := serverutils.StartNewTestCluster(t, numServers, base.TestClusterArgs{ + ServerArgsPerNode: serverArgs, + }) + + ctx := context.Background() + defer tc.Stopper().Stop(ctx) + + sqlDB := tc.ServerConn(0) + + // Setup the test. + _, err := sqlDB.Exec(`CREATE DATABASE db WITH PRIMARY REGION "us-east1" REGIONS "us-east2"`) + if err != nil { + t.Fatal(err) + } + + go func() { + if _, err := sqlDB.Exec(`ALTER DATABASE db DROP REGION "us-east2"`); err != nil { + t.Error(err) + } + close(dropRegionFinished) + }() + + // Wait for the drop region to start and move the enum member "us-east2" in + // read-only state. + <-dropRegionStarted + + _, err = sqlDB.Exec(`ALTER DATABASE db PRIMARY REGION "us-east2"`) + + if err == nil { + t.Fatalf("expected error, found nil") + } + if !testutils.IsError(err, `"us-east2" has not been added to the database`) { + t.Fatalf(`expected err, got %v`, err) + } + + close(waitForPrimaryRegionSwitch) + <-dropRegionFinished + + // We expect the async region drop job to succeed soon. + testutils.SucceedsSoon(t, func() error { + rows, err := sqlDB.Query("SELECT region FROM [SHOW REGIONS FROM DATABASE db]") + if err != nil { + return err + } + defer rows.Close() + + const expectedRegion = "us-east1" + var region string + rows.Next() + if err := rows.Scan(®ion); err != nil { + return err + } + + if region != expectedRegion { + return errors.Newf("expected region to be: %q, got %q", expectedRegion, region) + } + + if rows.Next() { + return errors.New("unexpected number of rows returned") + } + return nil + }) +} diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 4db02e6f7b3e..06add597cbb6 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -1059,13 +1059,23 @@ func (sc *SchemaChanger) done(ctx context.Context) error { return err } - referencedTypeIDs, err = scTable.GetAllReferencedTypeIDs(func(id descpb.ID) (catalog.TypeDescriptor, error) { - desc, err := descsCol.GetImmutableTypeByID(ctx, txn, id, tree.ObjectLookupFlags{}) - if err != nil { - return nil, err - } - return desc, nil - }) + dbDesc, err := descsCol.GetImmutableDatabaseByID( + ctx, + txn, + scTable.GetParentID(), + tree.DatabaseLookupFlags{Required: true}, + ) + if err != nil { + return err + } + referencedTypeIDs, err = scTable.GetAllReferencedTypeIDs(dbDesc, + func(id descpb.ID) (catalog.TypeDescriptor, error) { + desc, err := descsCol.GetImmutableTypeByID(ctx, txn, id, tree.ObjectLookupFlags{}) + if err != nil { + return nil, err + } + return desc, nil + }) if err != nil { return err } @@ -1163,16 +1173,6 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // configurations removes spans for indexes in the dropping state, // which we don't want. So, set up the zone configs before we swap. if lcSwap := pkSwap.LocalityConfigSwap; lcSwap != nil { - dbDesc, err := descsCol.GetImmutableDatabaseByID( - ctx, - txn, - scTable.GetParentID(), - tree.DatabaseLookupFlags{Required: true}, - ) - if err != nil { - return err - } - // We will add up to two options - one for the table itself, and one // for all the new indexes associated with the table. opts := make([]applyZoneConfigForMultiRegionTableOption, 0, 2) @@ -1290,7 +1290,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { } // For locality swaps, ensure the table descriptor fields are correctly filled. if lcSwap := pkSwap.LocalityConfigSwap; lcSwap != nil { - newConfig := &lcSwap.NewLocalityConfig + localityConfigToSwapTo := lcSwap.NewLocalityConfig if mutation.Direction == descpb.DescriptorMutation_ADD { // Sanity check that locality has not been changed during backfill. if !scTable.LocalityConfig.Equal(lcSwap.OldLocalityConfig) { @@ -1302,10 +1302,13 @@ func (sc *SchemaChanger) done(ctx context.Context) error { } } else { // DROP is hit on cancellation, in which case we must roll back. - newConfig = &lcSwap.OldLocalityConfig + localityConfigToSwapTo = lcSwap.OldLocalityConfig + } + if err := setNewLocalityConfig( + ctx, scTable, txn, b, localityConfigToSwapTo, kvTrace, descsCol); err != nil { + return err } - scTable.LocalityConfig = newConfig - switch newConfig.Locality.(type) { + switch localityConfigToSwapTo.Locality.(type) { case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_, *descpb.TableDescriptor_LocalityConfig_Global_: scTable.PartitionAllBy = false @@ -1314,7 +1317,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { default: return errors.AssertionFailedf( "unknown locality on PK swap: %T", - newConfig.Locality, + localityConfigToSwapTo, ) } @@ -1408,13 +1411,14 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // type descriptors. If this table has been dropped in the mean time, then // don't install any backreferences. if !scTable.Dropped() { - newReferencedTypeIDs, err := scTable.GetAllReferencedTypeIDs(func(id descpb.ID) (catalog.TypeDescriptor, error) { - typ, err := descsCol.GetMutableTypeVersionByID(ctx, txn, id) - if err != nil { - return nil, err - } - return typ, err - }) + newReferencedTypeIDs, err := scTable.GetAllReferencedTypeIDs(dbDesc, + func(id descpb.ID) (catalog.TypeDescriptor, error) { + typ, err := descsCol.GetMutableTypeVersionByID(ctx, txn, id) + if err != nil { + return nil, err + } + return typ, err + }) if err != nil { return err } diff --git a/pkg/sql/sem/tree/datum.go b/pkg/sql/sem/tree/datum.go index cbd594026d4e..1b48aac16acb 100644 --- a/pkg/sql/sem/tree/datum.go +++ b/pkg/sql/sem/tree/datum.go @@ -4015,7 +4015,7 @@ func MakeDEnumFromLogicalRepresentation(typ *types.T, rep string) (*DEnum, error // representation. This is to ensure that it will not be written until all // nodes in the cluster are able to decode the physical representation. if typ.TypeMeta.EnumData.IsMemberReadOnly[idx] { - return nil, errors.Newf("enum label %q is not yet public", rep) + return nil, errors.Newf("enum value %q is not yet public", rep) } return &DEnum{ EnumTyp: typ, diff --git a/pkg/sql/type_change.go b/pkg/sql/type_change.go index fc4d7a5e210b..2f9c8490a5ec 100644 --- a/pkg/sql/type_change.go +++ b/pkg/sql/type_change.go @@ -227,7 +227,7 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { fn() } - // First, we check if any of the enum labels that are being removed are in + // First, we check if any of the enum values that are being removed are in // use and fail. This is done in a separate txn to the one that mutates the // descriptor, as this validation can take arbitrarily long. validateDrops := func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { @@ -237,7 +237,7 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { } for _, member := range typeDesc.EnumMembers { if t.isTransitioningInCurrentJob(&member) && enumMemberIsRemoving(&member) { - if err := t.canRemoveEnumLabel(ctx, typeDesc, txn, &member, descsCol); err != nil { + if err := t.canRemoveEnumValue(ctx, typeDesc, txn, &member, descsCol); err != nil { return err } } @@ -251,7 +251,7 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { return err } - // Now that we've ascertained that the enum label can be removed, we can + // Now that we've ascertained that the enum values can be removed, we can // actually go about modifying the type descriptor. // The version of the array type needs to get bumped as well so that @@ -270,17 +270,9 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { } } // Next, deal with all the members that need to be removed from the slice. - idx := 0 - for _, member := range typeDesc.EnumMembers { - if t.isTransitioningInCurrentJob(&member) && enumMemberIsRemoving(&member) { - // Truncation logic below will remove all members that need to be - // removed. - continue - } - typeDesc.EnumMembers[idx] = member - idx++ - } - typeDesc.EnumMembers = typeDesc.EnumMembers[:idx] + applyFilterOnEnumMembers(typeDesc, func(member *descpb.TypeDescriptor_EnumMember) bool { + return t.isTransitioningInCurrentJob(member) && enumMemberIsRemoving(member) + }) b := txn.NewBatch() if err := descsCol.WriteDescToBatch( @@ -332,19 +324,38 @@ func (t *typeSchemaChanger) isTransitioningInCurrentJob( return false } -// cleanupEnumLabels performs cleanup if any of the enum label transitions +// applyFilterOnEnumMembers modifies the supplied typeDesc by removing all enum +// members as dictated by shouldRemove. +func applyFilterOnEnumMembers( + typeDesc *typedesc.Mutable, shouldRemove func(member *descpb.TypeDescriptor_EnumMember) bool, +) { + idx := 0 + for _, member := range typeDesc.EnumMembers { + if shouldRemove(&member) { + // By not updating the index, the truncation logic below will remove + // this label from the list of members. + continue + } + typeDesc.EnumMembers[idx] = member + idx++ + } + typeDesc.EnumMembers = typeDesc.EnumMembers[:idx] +} + +// cleanupEnumValues performs cleanup if any of the enum value transitions // fails. In particular: -// 1. If an enum label was being added as part of this txn, we remove it +// 1. If an enum value was being added as part of this txn, we remove it // from the descriptor. -// 2. If an enum label was being removed as part of this txn, we promote +// 2. If an enum value was being removed as part of this txn, we promote // it back to writable. -func (t *typeSchemaChanger) cleanupEnumLabels(ctx context.Context) error { +func (t *typeSchemaChanger) cleanupEnumValues(ctx context.Context) error { // Cleanup: cleanup := func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { typeDesc, err := descsCol.GetMutableTypeVersionByID(ctx, txn, t.typeID) if err != nil { return err } + b := txn.NewBatch() // No cleanup required. if !enumHasNonPublic(&typeDesc.Immutable) { return nil @@ -356,23 +367,33 @@ func (t *typeSchemaChanger) cleanupEnumLabels(ctx context.Context) error { if t.isTransitioningInCurrentJob(member) && enumMemberIsRemoving(member) { member.Capability = descpb.TypeDescriptor_EnumMember_ALL member.Direction = descpb.TypeDescriptor_EnumMember_NONE + + if typeDesc.Kind == descpb.TypeDescriptor_MULTIREGION_ENUM { + dbDesc, err := descsCol.GetMutableDatabaseByID( + ctx, txn, typeDesc.ParentID, tree.DatabaseLookupFlags{}) + if err != nil { + return err + } + err = addRegionToRegionConfig(dbDesc, descpb.RegionName(member.LogicalRepresentation)) + if err != nil { + return err + } + if err := dbDesc.Validate(); err != nil { + return errors.Wrapf(err, "could not re-add region to the database descriptor") + } + + if err := descsCol.WriteDescToBatch(ctx, true /* kvTrace */, dbDesc, b); err != nil { + return err + } + } } } // Now deal with all members that we initially hoped to add but now need // to be removed from the descriptor. - idx := 0 - for _, member := range typeDesc.EnumMembers { - if t.isTransitioningInCurrentJob(&member) && enumMemberIsAdding(&member) { - // By not updating the index, the truncation logic below will remove - // this label from the list of members. - continue - } - typeDesc.EnumMembers[idx] = member - idx++ - } - typeDesc.EnumMembers = typeDesc.EnumMembers[:idx] + applyFilterOnEnumMembers(typeDesc, func(member *descpb.TypeDescriptor_EnumMember) bool { + return t.isTransitioningInCurrentJob(member) && enumMemberIsAdding(member) + }) - b := txn.NewBatch() if err := descsCol.WriteDescToBatch( ctx, true /* kvTrace */, typeDesc, b, ); err != nil { @@ -395,9 +416,9 @@ func (t *typeSchemaChanger) cleanupEnumLabels(ctx context.Context) error { return nil } -// canRemoveEnumLabel returns an error if the enum label is in use and therefore +// canRemoveEnumValue returns an error if the enum value is in use and therefore // can't be removed. -func (t *typeSchemaChanger) canRemoveEnumLabel( +func (t *typeSchemaChanger) canRemoveEnumValue( ctx context.Context, typeDesc *typedesc.Mutable, txn *kv.Txn, @@ -428,6 +449,7 @@ func (t *typeSchemaChanger) canRemoveEnumLabel( columns := tree.AsStringWithFlags(&colSelectors, tree.FmtSerializable) query.WriteString(fmt.Sprintf("SELECT %s FROM [%d as t] WHERE", columns, ID)) firstClause := true + validationQueryConstructed := false for _, col := range desc.PublicColumns() { if typeDesc.ID == typedesc.GetTypeDescID(col.GetType()) { if !firstClause { @@ -436,36 +458,56 @@ func (t *typeSchemaChanger) canRemoveEnumLabel( query.WriteString(fmt.Sprintf(" t.%s = %s", col.GetName(), convertToSQLStringRepresentation(member.PhysicalRepresentation))) firstClause = false + validationQueryConstructed = true } } query.WriteString(" LIMIT 1") - // We need to override the internal executors current database (which would - // be unset by default) when executing the query constructed above. This is - // because the enum label may be used in a view expression, which is - // name resolved in the context of the type's database. - dbDesc, err := descsCol.GetImmutableDatabaseByID( - ctx, txn, typeDesc.ParentID, tree.DatabaseLookupFlags{}) - if err != nil { - return errors.Wrapf(err, - "could not validate enum value removal for %q", member.LogicalRepresentation) - } - override := sessiondata.InternalExecutorOverride{ - User: security.RootUserName(), - Database: dbDesc.Name, - } - rows, err := t.execCfg.InternalExecutor.QueryRowEx( - ctx, "count-value-usage", txn, override, query.String()) - if err != nil { - return errors.Wrapf(err, - "could not validate enum value removal for %q", member.LogicalRepresentation) + // NB: A type descriptor reference does not imply at-least one column in the + // table is of the type whose value is being removed. The notable exception + // being REGIONAL BY TABLE multi-region tables. In this case, no valid query + // is constructed and there's nothing to execute. Instead, their validation + // is handled as a special case below. + if validationQueryConstructed { + // We need to override the internal executor's current database (which would + // be unset by default) when executing the query constructed above. This is + // because the enum value may be used in a view expression, which is + // name resolved in the context of the type's database. + dbDesc, err := descsCol.GetImmutableDatabaseByID( + ctx, txn, typeDesc.ParentID, tree.DatabaseLookupFlags{}) + const validationErr = "could not validate removal of enum value %q" + if err != nil { + return errors.Wrapf(err, validationErr, member.LogicalRepresentation) + } + override := sessiondata.InternalExecutorOverride{ + User: security.RootUserName(), + Database: dbDesc.Name, + } + rows, err := t.execCfg.InternalExecutor.QueryRowEx(ctx, "count-value-usage", txn, override, query.String()) + if err != nil { + return errors.Wrapf(err, validationErr, member.LogicalRepresentation) + } + // Check if the above query returned a result. If it did, then the + // enum value is being used by some place. + if len(rows) > 0 { + return pgerror.Newf(pgcode.DependentObjectsStillExist, + "could not remove enum value %q as it is being used by %q in row: %s", + member.LogicalRepresentation, desc.GetName(), labeledRowValues(desc.PublicColumns(), rows)) + } } - // Check if the above query returned a result. If it did, then the - // enum value is being used by some place. - if len(rows) > 0 { - return pgerror.Newf(pgcode.DependentObjectsStillExist, - "could not remove enum value %q as it is being used by %q in row: %s", - member.LogicalRepresentation, desc.GetName(), labeledRowValues(desc.PublicColumns(), rows)) + + // If the type descriptor is a multi-region enum and the table descriptor + // belongs to a regional (by table) table, we disallow dropping the region + // if it is being used as the homed region for that table. + if typeDesc.Kind == descpb.TypeDescriptor_MULTIREGION_ENUM && desc.IsLocalityRegionalByTable() { + homedRegion, err := desc.GetRegionalByTableRegion() + if err != nil { + return err + } + if descpb.RegionName(member.LogicalRepresentation) == homedRegion { + return errors.Newf("could not remove enum value %q as it is the home region for table %q", + member.LogicalRepresentation, desc.GetName()) + } } } // We have ascertained that the value is not in use, and can therefore be @@ -570,7 +612,7 @@ func (t *typeChangeResumer) OnFailOrCancel(ctx context.Context, execCtx interfac execCfg: execCtx.(JobExecContext).ExecCfg(), } - if err := tc.cleanupEnumLabels(ctx); err != nil { + if err := tc.cleanupEnumValues(ctx); err != nil { return err } diff --git a/pkg/sql/walk.go b/pkg/sql/walk.go index de6d110e03ef..ca6ebf664a63 100644 --- a/pkg/sql/walk.go +++ b/pkg/sql/walk.go @@ -337,6 +337,7 @@ var planNodeNames = map[reflect.Type]string{ reflect.TypeOf(&alterDatabaseAddRegionNode{}): "alter database add region", reflect.TypeOf(&alterDatabasePrimaryRegionNode{}): "alter database primary region", reflect.TypeOf(&alterDatabaseSurvivalGoalNode{}): "alter database survive", + reflect.TypeOf(&alterDatabaseDropRegionNode{}): "alter database drop region", reflect.TypeOf(&alterIndexNode{}): "alter index", reflect.TypeOf(&alterSequenceNode{}): "alter sequence", reflect.TypeOf(&alterSchemaNode{}): "alter schema", diff --git a/pkg/util/log/eventpb/ddl_events.pb.go b/pkg/util/log/eventpb/ddl_events.pb.go index fdca8ca376cf..82d7f73dcd1b 100644 --- a/pkg/util/log/eventpb/ddl_events.pb.go +++ b/pkg/util/log/eventpb/ddl_events.pb.go @@ -32,7 +32,7 @@ func (m *CreateDatabase) Reset() { *m = CreateDatabase{} } func (m *CreateDatabase) String() string { return proto.CompactTextString(m) } func (*CreateDatabase) ProtoMessage() {} func (*CreateDatabase) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{0} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{0} } func (m *CreateDatabase) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -71,7 +71,7 @@ func (m *DropDatabase) Reset() { *m = DropDatabase{} } func (m *DropDatabase) String() string { return proto.CompactTextString(m) } func (*DropDatabase) ProtoMessage() {} func (*DropDatabase) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{1} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{1} } func (m *DropDatabase) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -110,7 +110,7 @@ func (m *AlterDatabaseAddRegion) Reset() { *m = AlterDatabaseAddRegion{} func (m *AlterDatabaseAddRegion) String() string { return proto.CompactTextString(m) } func (*AlterDatabaseAddRegion) ProtoMessage() {} func (*AlterDatabaseAddRegion) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{2} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{2} } func (m *AlterDatabaseAddRegion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,6 +135,45 @@ func (m *AlterDatabaseAddRegion) XXX_DiscardUnknown() { var xxx_messageInfo_AlterDatabaseAddRegion proto.InternalMessageInfo +// AlterDatabaseAddRegion is recorded when a region is added to a database. +type AlterDatabaseDropRegion struct { + CommonEventDetails `protobuf:"bytes,1,opt,name=common,proto3,embedded=common" json:""` + CommonSQLEventDetails `protobuf:"bytes,2,opt,name=sql,proto3,embedded=sql" json:""` + // The name of the database. + DatabaseName string `protobuf:"bytes,3,opt,name=database_name,json=databaseName,proto3" json:",omitempty"` + // The region being dropped. + RegionName string `protobuf:"bytes,4,opt,name=region_name,json=regionName,proto3" json:",omitempty"` +} + +func (m *AlterDatabaseDropRegion) Reset() { *m = AlterDatabaseDropRegion{} } +func (m *AlterDatabaseDropRegion) String() string { return proto.CompactTextString(m) } +func (*AlterDatabaseDropRegion) ProtoMessage() {} +func (*AlterDatabaseDropRegion) Descriptor() ([]byte, []int) { + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{3} +} +func (m *AlterDatabaseDropRegion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AlterDatabaseDropRegion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (dst *AlterDatabaseDropRegion) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlterDatabaseDropRegion.Merge(dst, src) +} +func (m *AlterDatabaseDropRegion) XXX_Size() int { + return m.Size() +} +func (m *AlterDatabaseDropRegion) XXX_DiscardUnknown() { + xxx_messageInfo_AlterDatabaseDropRegion.DiscardUnknown(m) +} + +var xxx_messageInfo_AlterDatabaseDropRegion proto.InternalMessageInfo + // AlterDatabasePrimaryRegion is recorded when a primary region is added/modified. type AlterDatabasePrimaryRegion struct { CommonEventDetails `protobuf:"bytes,1,opt,name=common,proto3,embedded=common" json:""` @@ -149,7 +188,7 @@ func (m *AlterDatabasePrimaryRegion) Reset() { *m = AlterDatabasePrimary func (m *AlterDatabasePrimaryRegion) String() string { return proto.CompactTextString(m) } func (*AlterDatabasePrimaryRegion) ProtoMessage() {} func (*AlterDatabasePrimaryRegion) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{3} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{4} } func (m *AlterDatabasePrimaryRegion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +227,7 @@ func (m *AlterDatabaseSurvivalGoal) Reset() { *m = AlterDatabaseSurvival func (m *AlterDatabaseSurvivalGoal) String() string { return proto.CompactTextString(m) } func (*AlterDatabaseSurvivalGoal) ProtoMessage() {} func (*AlterDatabaseSurvivalGoal) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{4} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{5} } func (m *AlterDatabaseSurvivalGoal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -227,7 +266,7 @@ func (m *RenameDatabase) Reset() { *m = RenameDatabase{} } func (m *RenameDatabase) String() string { return proto.CompactTextString(m) } func (*RenameDatabase) ProtoMessage() {} func (*RenameDatabase) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{5} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{6} } func (m *RenameDatabase) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -266,7 +305,7 @@ func (m *ConvertToSchema) Reset() { *m = ConvertToSchema{} } func (m *ConvertToSchema) String() string { return proto.CompactTextString(m) } func (*ConvertToSchema) ProtoMessage() {} func (*ConvertToSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{6} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{7} } func (m *ConvertToSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -305,7 +344,7 @@ func (m *CreateSchema) Reset() { *m = CreateSchema{} } func (m *CreateSchema) String() string { return proto.CompactTextString(m) } func (*CreateSchema) ProtoMessage() {} func (*CreateSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{7} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{8} } func (m *CreateSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -342,7 +381,7 @@ func (m *DropSchema) Reset() { *m = DropSchema{} } func (m *DropSchema) String() string { return proto.CompactTextString(m) } func (*DropSchema) ProtoMessage() {} func (*DropSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{8} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{9} } func (m *DropSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -381,7 +420,7 @@ func (m *RenameSchema) Reset() { *m = RenameSchema{} } func (m *RenameSchema) String() string { return proto.CompactTextString(m) } func (*RenameSchema) ProtoMessage() {} func (*RenameSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{9} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{10} } func (m *RenameSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -422,7 +461,7 @@ func (m *SetSchema) Reset() { *m = SetSchema{} } func (m *SetSchema) String() string { return proto.CompactTextString(m) } func (*SetSchema) ProtoMessage() {} func (*SetSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{10} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{11} } func (m *SetSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -461,7 +500,7 @@ func (m *CreateTable) Reset() { *m = CreateTable{} } func (m *CreateTable) String() string { return proto.CompactTextString(m) } func (*CreateTable) ProtoMessage() {} func (*CreateTable) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{11} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{12} } func (m *CreateTable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +539,7 @@ func (m *DropTable) Reset() { *m = DropTable{} } func (m *DropTable) String() string { return proto.CompactTextString(m) } func (*DropTable) ProtoMessage() {} func (*DropTable) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{12} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{13} } func (m *DropTable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -539,7 +578,7 @@ func (m *RenameTable) Reset() { *m = RenameTable{} } func (m *RenameTable) String() string { return proto.CompactTextString(m) } func (*RenameTable) ProtoMessage() {} func (*RenameTable) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{13} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{14} } func (m *RenameTable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -576,7 +615,7 @@ func (m *TruncateTable) Reset() { *m = TruncateTable{} } func (m *TruncateTable) String() string { return proto.CompactTextString(m) } func (*TruncateTable) ProtoMessage() {} func (*TruncateTable) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{14} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{15} } func (m *TruncateTable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -617,7 +656,7 @@ func (m *AlterTable) Reset() { *m = AlterTable{} } func (m *AlterTable) String() string { return proto.CompactTextString(m) } func (*AlterTable) ProtoMessage() {} func (*AlterTable) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{15} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{16} } func (m *AlterTable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,7 +699,7 @@ func (m *CommentOnColumn) Reset() { *m = CommentOnColumn{} } func (m *CommentOnColumn) String() string { return proto.CompactTextString(m) } func (*CommentOnColumn) ProtoMessage() {} func (*CommentOnColumn) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{16} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{17} } func (m *CommentOnColumn) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -701,7 +740,7 @@ func (m *CommentOnDatabase) Reset() { *m = CommentOnDatabase{} } func (m *CommentOnDatabase) String() string { return proto.CompactTextString(m) } func (*CommentOnDatabase) ProtoMessage() {} func (*CommentOnDatabase) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{17} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{18} } func (m *CommentOnDatabase) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -742,7 +781,7 @@ func (m *CommentOnTable) Reset() { *m = CommentOnTable{} } func (m *CommentOnTable) String() string { return proto.CompactTextString(m) } func (*CommentOnTable) ProtoMessage() {} func (*CommentOnTable) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{18} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{19} } func (m *CommentOnTable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -785,7 +824,7 @@ func (m *CommentOnIndex) Reset() { *m = CommentOnIndex{} } func (m *CommentOnIndex) String() string { return proto.CompactTextString(m) } func (*CommentOnIndex) ProtoMessage() {} func (*CommentOnIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{19} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{20} } func (m *CommentOnIndex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -826,7 +865,7 @@ func (m *CreateIndex) Reset() { *m = CreateIndex{} } func (m *CreateIndex) String() string { return proto.CompactTextString(m) } func (*CreateIndex) ProtoMessage() {} func (*CreateIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{20} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{21} } func (m *CreateIndex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -869,7 +908,7 @@ func (m *DropIndex) Reset() { *m = DropIndex{} } func (m *DropIndex) String() string { return proto.CompactTextString(m) } func (*DropIndex) ProtoMessage() {} func (*DropIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{21} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{22} } func (m *DropIndex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -910,7 +949,7 @@ func (m *AlterIndex) Reset() { *m = AlterIndex{} } func (m *AlterIndex) String() string { return proto.CompactTextString(m) } func (*AlterIndex) ProtoMessage() {} func (*AlterIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{22} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{23} } func (m *AlterIndex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -951,7 +990,7 @@ func (m *CreateView) Reset() { *m = CreateView{} } func (m *CreateView) String() string { return proto.CompactTextString(m) } func (*CreateView) ProtoMessage() {} func (*CreateView) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{23} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{24} } func (m *CreateView) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -990,7 +1029,7 @@ func (m *DropView) Reset() { *m = DropView{} } func (m *DropView) String() string { return proto.CompactTextString(m) } func (*DropView) ProtoMessage() {} func (*DropView) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{24} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{25} } func (m *DropView) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1029,7 +1068,7 @@ func (m *CreateSequence) Reset() { *m = CreateSequence{} } func (m *CreateSequence) String() string { return proto.CompactTextString(m) } func (*CreateSequence) ProtoMessage() {} func (*CreateSequence) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{25} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{26} } func (m *CreateSequence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1066,7 +1105,7 @@ func (m *DropSequence) Reset() { *m = DropSequence{} } func (m *DropSequence) String() string { return proto.CompactTextString(m) } func (*DropSequence) ProtoMessage() {} func (*DropSequence) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{26} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{27} } func (m *DropSequence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1103,7 +1142,7 @@ func (m *AlterSequence) Reset() { *m = AlterSequence{} } func (m *AlterSequence) String() string { return proto.CompactTextString(m) } func (*AlterSequence) ProtoMessage() {} func (*AlterSequence) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{27} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{28} } func (m *AlterSequence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1151,7 +1190,7 @@ func (m *CommonSchemaChangeEventDetails) Reset() { *m = CommonSchemaChan func (m *CommonSchemaChangeEventDetails) String() string { return proto.CompactTextString(m) } func (*CommonSchemaChangeEventDetails) ProtoMessage() {} func (*CommonSchemaChangeEventDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{28} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{29} } func (m *CommonSchemaChangeEventDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1192,7 +1231,7 @@ func (m *ReverseSchemaChange) Reset() { *m = ReverseSchemaChange{} } func (m *ReverseSchemaChange) String() string { return proto.CompactTextString(m) } func (*ReverseSchemaChange) ProtoMessage() {} func (*ReverseSchemaChange) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{29} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{30} } func (m *ReverseSchemaChange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1267,7 @@ func (m *FinishSchemaChange) Reset() { *m = FinishSchemaChange{} } func (m *FinishSchemaChange) String() string { return proto.CompactTextString(m) } func (*FinishSchemaChange) ProtoMessage() {} func (*FinishSchemaChange) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{30} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{31} } func (m *FinishSchemaChange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1264,7 +1303,7 @@ func (m *FinishSchemaChangeRollback) Reset() { *m = FinishSchemaChangeRo func (m *FinishSchemaChangeRollback) String() string { return proto.CompactTextString(m) } func (*FinishSchemaChangeRollback) ProtoMessage() {} func (*FinishSchemaChangeRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{31} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{32} } func (m *FinishSchemaChangeRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1303,7 +1342,7 @@ func (m *CreateType) Reset() { *m = CreateType{} } func (m *CreateType) String() string { return proto.CompactTextString(m) } func (*CreateType) ProtoMessage() {} func (*CreateType) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{32} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{33} } func (m *CreateType) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1379,7 @@ func (m *DropType) Reset() { *m = DropType{} } func (m *DropType) String() string { return proto.CompactTextString(m) } func (*DropType) ProtoMessage() {} func (*DropType) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{33} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{34} } func (m *DropType) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1377,7 +1416,7 @@ func (m *AlterType) Reset() { *m = AlterType{} } func (m *AlterType) String() string { return proto.CompactTextString(m) } func (*AlterType) ProtoMessage() {} func (*AlterType) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{34} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{35} } func (m *AlterType) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1416,7 +1455,7 @@ func (m *RenameType) Reset() { *m = RenameType{} } func (m *RenameType) String() string { return proto.CompactTextString(m) } func (*RenameType) ProtoMessage() {} func (*RenameType) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{35} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{36} } func (m *RenameType) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1457,7 +1496,7 @@ func (m *CreateStatistics) Reset() { *m = CreateStatistics{} } func (m *CreateStatistics) String() string { return proto.CompactTextString(m) } func (*CreateStatistics) ProtoMessage() {} func (*CreateStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{36} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{37} } func (m *CreateStatistics) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1497,7 +1536,7 @@ func (m *UnsafeUpsertDescriptor) Reset() { *m = UnsafeUpsertDescriptor{} func (m *UnsafeUpsertDescriptor) String() string { return proto.CompactTextString(m) } func (*UnsafeUpsertDescriptor) ProtoMessage() {} func (*UnsafeUpsertDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{37} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{38} } func (m *UnsafeUpsertDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1541,7 +1580,7 @@ func (m *UnsafeDeleteDescriptor) Reset() { *m = UnsafeDeleteDescriptor{} func (m *UnsafeDeleteDescriptor) String() string { return proto.CompactTextString(m) } func (*UnsafeDeleteDescriptor) ProtoMessage() {} func (*UnsafeDeleteDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{38} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{39} } func (m *UnsafeDeleteDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1587,7 +1626,7 @@ func (m *UnsafeUpsertNamespaceEntry) Reset() { *m = UnsafeUpsertNamespac func (m *UnsafeUpsertNamespaceEntry) String() string { return proto.CompactTextString(m) } func (*UnsafeUpsertNamespaceEntry) ProtoMessage() {} func (*UnsafeUpsertNamespaceEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{39} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{40} } func (m *UnsafeUpsertNamespaceEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1631,7 +1670,7 @@ func (m *UnsafeDeleteNamespaceEntry) Reset() { *m = UnsafeDeleteNamespac func (m *UnsafeDeleteNamespaceEntry) String() string { return proto.CompactTextString(m) } func (*UnsafeDeleteNamespaceEntry) ProtoMessage() {} func (*UnsafeDeleteNamespaceEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_ddl_events_c32332d2c9b04c72, []int{40} + return fileDescriptor_ddl_events_329fe5af9ff3ec61, []int{41} } func (m *UnsafeDeleteNamespaceEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1660,6 +1699,7 @@ func init() { proto.RegisterType((*CreateDatabase)(nil), "cockroach.util.log.eventpb.CreateDatabase") proto.RegisterType((*DropDatabase)(nil), "cockroach.util.log.eventpb.DropDatabase") proto.RegisterType((*AlterDatabaseAddRegion)(nil), "cockroach.util.log.eventpb.AlterDatabaseAddRegion") + proto.RegisterType((*AlterDatabaseDropRegion)(nil), "cockroach.util.log.eventpb.AlterDatabaseDropRegion") proto.RegisterType((*AlterDatabasePrimaryRegion)(nil), "cockroach.util.log.eventpb.AlterDatabasePrimaryRegion") proto.RegisterType((*AlterDatabaseSurvivalGoal)(nil), "cockroach.util.log.eventpb.AlterDatabaseSurvivalGoal") proto.RegisterType((*RenameDatabase)(nil), "cockroach.util.log.eventpb.RenameDatabase") @@ -1840,7 +1880,7 @@ func (m *AlterDatabaseAddRegion) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *AlterDatabasePrimaryRegion) Marshal() (dAtA []byte, err error) { +func (m *AlterDatabaseDropRegion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -1850,7 +1890,7 @@ func (m *AlterDatabasePrimaryRegion) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AlterDatabasePrimaryRegion) MarshalTo(dAtA []byte) (int, error) { +func (m *AlterDatabaseDropRegion) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -1877,6 +1917,52 @@ func (m *AlterDatabasePrimaryRegion) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintDdlEvents(dAtA, i, uint64(len(m.DatabaseName))) i += copy(dAtA[i:], m.DatabaseName) } + if len(m.RegionName) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintDdlEvents(dAtA, i, uint64(len(m.RegionName))) + i += copy(dAtA[i:], m.RegionName) + } + return i, nil +} + +func (m *AlterDatabasePrimaryRegion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlterDatabasePrimaryRegion) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) + n9, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x12 + i++ + i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) + n10, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if len(m.DatabaseName) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintDdlEvents(dAtA, i, uint64(len(m.DatabaseName))) + i += copy(dAtA[i:], m.DatabaseName) + } if len(m.PrimaryRegionName) > 0 { dAtA[i] = 0x22 i++ @@ -1904,19 +1990,19 @@ func (m *AlterDatabaseSurvivalGoal) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n9, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n11, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n11 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n10, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n12, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n12 if len(m.DatabaseName) > 0 { dAtA[i] = 0x1a i++ @@ -1950,19 +2036,19 @@ func (m *RenameDatabase) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n11, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n13, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n13 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n12, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n14, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n14 if len(m.DatabaseName) > 0 { dAtA[i] = 0x1a i++ @@ -1996,19 +2082,19 @@ func (m *ConvertToSchema) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n13, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n15, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n15 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n14, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n16, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n16 if len(m.DatabaseName) > 0 { dAtA[i] = 0x1a i++ @@ -2042,19 +2128,19 @@ func (m *CreateSchema) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n15, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n17, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n17 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n16, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n18, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n18 if len(m.SchemaName) > 0 { dAtA[i] = 0x1a i++ @@ -2088,19 +2174,19 @@ func (m *DropSchema) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n17, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n19, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n19 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n18, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n20, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n20 if len(m.SchemaName) > 0 { dAtA[i] = 0x1a i++ @@ -2128,19 +2214,19 @@ func (m *RenameSchema) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n19, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n21, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n21 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n20, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n22, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n22 if len(m.SchemaName) > 0 { dAtA[i] = 0x1a i++ @@ -2174,19 +2260,19 @@ func (m *SetSchema) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n21, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n23, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n23 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n22, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n24, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n24 if len(m.DescriptorName) > 0 { dAtA[i] = 0x1a i++ @@ -2226,19 +2312,19 @@ func (m *CreateTable) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n23, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n25, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n25 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n24, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n26, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n26 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2272,19 +2358,19 @@ func (m *DropTable) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n25, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n27, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n27 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n26, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n28, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n28 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2327,19 +2413,19 @@ func (m *RenameTable) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n27, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n29, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n29 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n28, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n30, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n30 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2373,19 +2459,19 @@ func (m *TruncateTable) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n29, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n31, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n31 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n30, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n32, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n32 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2413,19 +2499,19 @@ func (m *AlterTable) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n31, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n33, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n33 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n32, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n34, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n34 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2473,19 +2559,19 @@ func (m *CommentOnColumn) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n33, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n35, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n35 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n34, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n36, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n36 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2535,19 +2621,19 @@ func (m *CommentOnDatabase) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n35, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n37, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n37 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n36, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n38, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n38 if len(m.DatabaseName) > 0 { dAtA[i] = 0x1a i++ @@ -2591,19 +2677,19 @@ func (m *CommentOnTable) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n37, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n39, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n39 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n38, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n40, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n40 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2647,19 +2733,19 @@ func (m *CommentOnIndex) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n39, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n41, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n41 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n40, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n42, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n42 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2709,19 +2795,19 @@ func (m *CreateIndex) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n41, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n43, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n43 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n42, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n44, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n44 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2760,19 +2846,19 @@ func (m *DropIndex) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n43, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n45, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n45 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n44, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n46, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n46 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2826,19 +2912,19 @@ func (m *AlterIndex) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n45, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n47, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n47 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n46, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n48, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n48 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -2877,19 +2963,19 @@ func (m *CreateView) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n47, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n49, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n49 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n48, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n50, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n50 if len(m.ViewName) > 0 { dAtA[i] = 0x1a i++ @@ -2929,19 +3015,19 @@ func (m *DropView) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n49, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n51, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n51 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n50, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n52, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n52 if len(m.ViewName) > 0 { dAtA[i] = 0x1a i++ @@ -2984,19 +3070,19 @@ func (m *CreateSequence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n51, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n53, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n53 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n52, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n54, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n54 if len(m.SequenceName) > 0 { dAtA[i] = 0x1a i++ @@ -3030,19 +3116,19 @@ func (m *DropSequence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n53, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n55, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n55 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n54, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n56, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n56 if len(m.SequenceName) > 0 { dAtA[i] = 0x1a i++ @@ -3070,19 +3156,19 @@ func (m *AlterSequence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n55, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n57, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n57 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n56, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n58, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n58 if len(m.SequenceName) > 0 { dAtA[i] = 0x1a i++ @@ -3143,19 +3229,19 @@ func (m *ReverseSchemaChange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n57, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n59, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n59 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSchemaChangeEventDetails.Size())) - n58, err := m.CommonSchemaChangeEventDetails.MarshalTo(dAtA[i:]) + n60, err := m.CommonSchemaChangeEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n60 if len(m.Error) > 0 { dAtA[i] = 0x22 i++ @@ -3189,19 +3275,19 @@ func (m *FinishSchemaChange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n59, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n61, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n61 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSchemaChangeEventDetails.Size())) - n60, err := m.CommonSchemaChangeEventDetails.MarshalTo(dAtA[i:]) + n62, err := m.CommonSchemaChangeEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n62 return i, nil } @@ -3223,19 +3309,19 @@ func (m *FinishSchemaChangeRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n61, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n63, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n63 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSchemaChangeEventDetails.Size())) - n62, err := m.CommonSchemaChangeEventDetails.MarshalTo(dAtA[i:]) + n64, err := m.CommonSchemaChangeEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n64 return i, nil } @@ -3257,19 +3343,19 @@ func (m *CreateType) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n63, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n65, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n63 + i += n65 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n64, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n66, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n66 if len(m.TypeName) > 0 { dAtA[i] = 0x22 i++ @@ -3303,19 +3389,19 @@ func (m *DropType) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n65, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n67, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n67 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n66, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n68, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n68 if len(m.TypeName) > 0 { dAtA[i] = 0x1a i++ @@ -3343,19 +3429,19 @@ func (m *AlterType) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n67, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n69, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n69 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n68, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n70, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n70 if len(m.TypeName) > 0 { dAtA[i] = 0x1a i++ @@ -3383,19 +3469,19 @@ func (m *RenameType) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n69, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n71, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n69 + i += n71 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n70, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n72, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n72 if len(m.TypeName) > 0 { dAtA[i] = 0x1a i++ @@ -3429,19 +3515,19 @@ func (m *CreateStatistics) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n71, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n73, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n71 + i += n73 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n72, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n74, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n72 + i += n74 if len(m.TableName) > 0 { dAtA[i] = 0x1a i++ @@ -3469,19 +3555,19 @@ func (m *UnsafeUpsertDescriptor) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n73, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n75, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n73 + i += n75 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n74, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n76, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n76 if len(m.PreviousDescriptor) > 0 { dAtA[i] = 0x1a i++ @@ -3531,19 +3617,19 @@ func (m *UnsafeDeleteDescriptor) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n75, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n77, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n77 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n76, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n78, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n78 if m.ParentID != 0 { dAtA[i] = 0x18 i++ @@ -3597,19 +3683,19 @@ func (m *UnsafeUpsertNamespaceEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n77, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n79, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n77 + i += n79 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n78, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n80, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 + i += n80 if m.ParentID != 0 { dAtA[i] = 0x18 i++ @@ -3678,19 +3764,19 @@ func (m *UnsafeDeleteNamespaceEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonEventDetails.Size())) - n79, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) + n81, err := m.CommonEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n79 + i += n81 dAtA[i] = 0x12 i++ i = encodeVarintDdlEvents(dAtA, i, uint64(m.CommonSQLEventDetails.Size())) - n80, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) + n82, err := m.CommonSQLEventDetails.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n80 + i += n82 if m.ParentID != 0 { dAtA[i] = 0x18 i++ @@ -3796,6 +3882,27 @@ func (m *AlterDatabaseAddRegion) Size() (n int) { return n } +func (m *AlterDatabaseDropRegion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonEventDetails.Size() + n += 1 + l + sovDdlEvents(uint64(l)) + l = m.CommonSQLEventDetails.Size() + n += 1 + l + sovDdlEvents(uint64(l)) + l = len(m.DatabaseName) + if l > 0 { + n += 1 + l + sovDdlEvents(uint64(l)) + } + l = len(m.RegionName) + if l > 0 { + n += 1 + l + sovDdlEvents(uint64(l)) + } + return n +} + func (m *AlterDatabasePrimaryRegion) Size() (n int) { if m == nil { return 0 @@ -5127,6 +5234,174 @@ func (m *AlterDatabaseAddRegion) Unmarshal(dAtA []byte) error { } return nil } +func (m *AlterDatabaseDropRegion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDdlEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlterDatabaseDropRegion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlterDatabaseDropRegion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonEventDetails", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDdlEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDdlEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonEventDetails.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSQLEventDetails", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDdlEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDdlEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSQLEventDetails.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatabaseName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDdlEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDdlEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatabaseName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDdlEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDdlEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegionName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDdlEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDdlEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AlterDatabasePrimaryRegion) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -11800,108 +12075,109 @@ var ( ) func init() { - proto.RegisterFile("util/log/eventpb/ddl_events.proto", fileDescriptor_ddl_events_c32332d2c9b04c72) -} - -var fileDescriptor_ddl_events_c32332d2c9b04c72 = []byte{ - // 1571 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x9b, 0xcd, 0x6f, 0x1b, 0xc5, - 0x1b, 0xc7, 0xb3, 0xeb, 0xbc, 0xd8, 0x8f, 0x5f, 0x9a, 0x6c, 0xda, 0x2a, 0xbf, 0xe8, 0x87, 0x1d, - 0x56, 0x3d, 0x04, 0x41, 0x13, 0xb5, 0x01, 0x2a, 0x15, 0x15, 0xd4, 0xc4, 0x01, 0x19, 0x95, 0xb6, - 0x89, 0xdd, 0x0a, 0x71, 0xb1, 0x36, 0xbb, 0xd3, 0x64, 0xe9, 0x7a, 0x66, 0xb3, 0x3b, 0xb6, 0xf1, - 0x1f, 0x80, 0x84, 0x84, 0x84, 0x10, 0x42, 0x5c, 0xb8, 0x70, 0x40, 0xaa, 0xc4, 0x01, 0x24, 0x2e, - 0x48, 0x88, 0x1b, 0x20, 0x7a, 0x00, 0x54, 0xc1, 0xa5, 0x27, 0xab, 0x75, 0xa4, 0x56, 0xaa, 0x80, - 0x03, 0x42, 0xe2, 0x8a, 0x66, 0x66, 0xd7, 0x5e, 0xc7, 0x5e, 0xa7, 0x91, 0xd2, 0x83, 0x37, 0xb9, - 0x39, 0xde, 0xef, 0x33, 0xe3, 0xe7, 0x33, 0x33, 0xcf, 0xf3, 0xcc, 0xcc, 0x06, 0x9e, 0xae, 0x52, - 0xd3, 0x5a, 0xb4, 0xc8, 0xe6, 0x22, 0xaa, 0x21, 0x4c, 0xed, 0x8d, 0x45, 0xc3, 0xb0, 0xca, 0xfc, - 0xb3, 0xbb, 0x60, 0x3b, 0x84, 0x12, 0x65, 0x56, 0x27, 0xfa, 0x4d, 0x87, 0x68, 0xfa, 0xd6, 0x02, - 0x13, 0x2f, 0x58, 0x64, 0x73, 0xc1, 0x13, 0xcf, 0x1e, 0xdf, 0x24, 0x9b, 0x84, 0xcb, 0x16, 0xd9, - 0x27, 0x61, 0x31, 0xfb, 0x54, 0x4f, 0xa3, 0xc1, 0x06, 0xd5, 0xbf, 0x24, 0xc8, 0xac, 0x38, 0x48, - 0xa3, 0x28, 0xaf, 0x51, 0x6d, 0x43, 0x73, 0x91, 0x52, 0x82, 0x71, 0x9d, 0x54, 0x2a, 0x04, 0xcf, - 0x48, 0x73, 0xd2, 0x7c, 0xf2, 0xec, 0xc2, 0x42, 0x78, 0xa7, 0x0b, 0x2b, 0x5c, 0xb9, 0xca, 0xfe, - 0xca, 0x23, 0xaa, 0x99, 0x96, 0xbb, 0x9c, 0xba, 0xdd, 0xcc, 0x8d, 0xdc, 0x69, 0xe6, 0xa4, 0x47, - 0xcd, 0xdc, 0xc8, 0xba, 0xd7, 0x96, 0xb2, 0x06, 0x31, 0x77, 0xdb, 0x9a, 0x91, 0x79, 0x93, 0x67, - 0xf6, 0x6e, 0xb2, 0xb8, 0x76, 0x69, 0x40, 0xab, 0xac, 0x2d, 0x65, 0x09, 0xd2, 0x86, 0xf7, 0xa3, - 0xcb, 0x58, 0xab, 0xa0, 0x99, 0xd8, 0x9c, 0x34, 0x9f, 0x58, 0xce, 0x3c, 0x6a, 0xe6, 0xe0, 0x39, - 0x52, 0x31, 0x29, 0xaa, 0xd8, 0xb4, 0xb1, 0x9e, 0xf2, 0x45, 0x97, 0xb5, 0x0a, 0x52, 0xbf, 0x96, - 0x21, 0x95, 0x77, 0x88, 0x7d, 0x38, 0xdc, 0x55, 0xf2, 0x70, 0xd2, 0x70, 0x88, 0x6d, 0x23, 0xa3, - 0xec, 0xea, 0x5b, 0xa8, 0xa2, 0x95, 0xc9, 0xc6, 0xdb, 0x48, 0xa7, 0xee, 0xcc, 0xe8, 0x5c, 0xac, - 0x8f, 0xf5, 0x71, 0x4f, 0x5d, 0xe4, 0xe2, 0x2b, 0x42, 0xab, 0xde, 0x92, 0xe1, 0xe4, 0x45, 0x8b, - 0x22, 0xc7, 0xa7, 0x76, 0xd1, 0x30, 0xd6, 0xd1, 0xa6, 0x49, 0x70, 0xc4, 0xf1, 0x2d, 0x42, 0xd2, - 0xe1, 0x7e, 0x0a, 0x93, 0xd1, 0xbe, 0x26, 0x20, 0x24, 0x7c, 0x7a, 0x7d, 0x2b, 0xc3, 0x6c, 0x17, - 0xa9, 0xab, 0x8e, 0x59, 0xd1, 0x9c, 0xc6, 0xa1, 0xa0, 0xf5, 0x32, 0x4c, 0xdb, 0xc2, 0xdd, 0xf2, - 0xde, 0xd4, 0xa6, 0xec, 0x20, 0x19, 0x0e, 0xef, 0x4b, 0x19, 0xfe, 0xd7, 0x05, 0xaf, 0x58, 0x75, - 0x6a, 0x66, 0x4d, 0xb3, 0x5e, 0x23, 0x9a, 0x15, 0x71, 0x76, 0x4b, 0x90, 0x76, 0x3d, 0x6f, 0xcb, - 0x9b, 0x44, 0xb3, 0x42, 0xa8, 0xa5, 0xdc, 0x00, 0x12, 0xf5, 0x0b, 0x19, 0x32, 0xeb, 0x88, 0xf5, - 0x71, 0x48, 0xc2, 0xd9, 0x79, 0x98, 0xc2, 0xa8, 0x5e, 0xee, 0x36, 0xec, 0x4f, 0xea, 0x18, 0x46, - 0xf5, 0x7c, 0x30, 0xf2, 0x7f, 0x25, 0xc3, 0xb1, 0x15, 0x82, 0x6b, 0xc8, 0xa1, 0x25, 0x22, 0xe2, - 0x5b, 0xf4, 0xd7, 0x63, 0x17, 0x2d, 0x5b, 0x73, 0x10, 0xa6, 0x61, 0xeb, 0x31, 0xc0, 0xeb, 0x2a, - 0x17, 0xaa, 0x1f, 0xc8, 0x90, 0x12, 0xc5, 0xc1, 0xb0, 0xe1, 0x5a, 0x84, 0xa4, 0x97, 0xee, 0x06, - 0xc0, 0x02, 0x21, 0xe1, 0xa8, 0x4e, 0xc1, 0x18, 0xa9, 0x63, 0xe4, 0x84, 0xc0, 0x11, 0x0f, 0xd5, - 0x07, 0x12, 0x00, 0x2b, 0x1e, 0xa2, 0x8e, 0x43, 0xfd, 0x4c, 0x86, 0x94, 0x08, 0x2c, 0x91, 0x1f, - 0xf9, 0x17, 0x81, 0x45, 0x8a, 0x72, 0xd0, 0xa8, 0xff, 0x1c, 0x48, 0x63, 0x54, 0x2f, 0x76, 0x10, - 0x3d, 0x90, 0x21, 0x51, 0x44, 0x74, 0xd8, 0xf8, 0x9c, 0x83, 0x63, 0x06, 0x72, 0x75, 0xc7, 0xb4, - 0x29, 0x71, 0x06, 0x31, 0xca, 0x74, 0x64, 0x5d, 0xc1, 0x64, 0x97, 0xf1, 0x80, 0x60, 0xd2, 0x6d, - 0xdf, 0xdd, 0x31, 0x6d, 0xd8, 0x68, 0x66, 0x6c, 0xaf, 0x8e, 0x4b, 0x0d, 0x1b, 0xa9, 0xef, 0xcb, - 0x90, 0x14, 0x51, 0xa8, 0xa4, 0x6d, 0x58, 0x43, 0x94, 0xe1, 0x4e, 0x03, 0x50, 0xf6, 0x8b, 0x07, - 0x51, 0x4e, 0x70, 0xc5, 0x3e, 0x42, 0xd0, 0x2d, 0x19, 0x12, 0x2c, 0x04, 0x45, 0x9b, 0xc5, 0x32, - 0x9c, 0xd0, 0x35, 0x57, 0xd7, 0x0c, 0x54, 0xf6, 0xb7, 0x2f, 0x35, 0x13, 0xd5, 0xc3, 0x76, 0x2d, - 0xd3, 0x9e, 0x38, 0x2f, 0xb4, 0xd7, 0x99, 0x54, 0xfd, 0x54, 0x86, 0xa4, 0x88, 0x61, 0xd1, 0x66, - 0xf5, 0x3c, 0x64, 0xd8, 0xc2, 0x0c, 0x98, 0x84, 0x94, 0x8e, 0x18, 0xd5, 0x4b, 0xbe, 0x95, 0xfa, - 0x50, 0x82, 0x74, 0xc9, 0xa9, 0x62, 0x3d, 0xea, 0xeb, 0x4a, 0x7d, 0x28, 0x03, 0xf0, 0x5d, 0x45, - 0xb4, 0xa7, 0xc1, 0x05, 0x48, 0x56, 0xaa, 0x54, 0xa3, 0x6c, 0xdb, 0x65, 0x1a, 0x7c, 0x0e, 0xa4, - 0x97, 0xff, 0xdf, 0x6a, 0xe6, 0xe0, 0x0d, 0xef, 0xeb, 0x42, 0x7e, 0x77, 0x1a, 0xf4, 0x0d, 0x0a, - 0x46, 0xf8, 0x8a, 0x1b, 0x7b, 0xfc, 0x15, 0xf7, 0x2f, 0xaf, 0xb0, 0x2b, 0x15, 0x84, 0xe9, 0x15, - 0xbc, 0x42, 0xac, 0x6a, 0x05, 0x47, 0x16, 0xf7, 0x22, 0x24, 0x75, 0xee, 0xe1, 0xc0, 0x93, 0x01, - 0x21, 0xe1, 0x06, 0xf3, 0x30, 0xa1, 0x0b, 0x36, 0x21, 0x79, 0xcf, 0x7f, 0xac, 0x9c, 0x81, 0x14, - 0xae, 0x5a, 0x56, 0xd9, 0x97, 0x8f, 0xcf, 0x49, 0xf3, 0xf1, 0x1e, 0x79, 0x92, 0x69, 0x3c, 0xda, - 0xea, 0xaf, 0x32, 0x4c, 0xb5, 0xc9, 0x1f, 0x92, 0xbd, 0x60, 0x00, 0xe8, 0xe8, 0x81, 0x03, 0xfd, - 0x51, 0x86, 0x4c, 0x1b, 0x68, 0xb4, 0x03, 0xc7, 0x13, 0xe5, 0xf8, 0x4f, 0x90, 0x63, 0x01, 0x1b, - 0xe8, 0x9d, 0xc8, 0x72, 0x3c, 0x0d, 0x60, 0x32, 0x07, 0x07, 0x05, 0x84, 0x04, 0x57, 0x3c, 0xf9, - 0x78, 0xf0, 0x7b, 0xbb, 0x66, 0x3e, 0x62, 0x1e, 0x60, 0xbe, 0x2b, 0x47, 0x8e, 0xed, 0x2f, 0x47, - 0xaa, 0xef, 0xc6, 0x44, 0xed, 0x7d, 0xc4, 0xf4, 0xc0, 0x98, 0x2a, 0x67, 0xc3, 0xea, 0x8e, 0x71, - 0x56, 0x77, 0xf4, 0xaf, 0x33, 0x7e, 0xf3, 0x2b, 0xba, 0xa3, 0x81, 0x38, 0xb8, 0xc9, 0xfd, 0x8d, - 0x0c, 0x20, 0x42, 0x06, 0x83, 0x3c, 0x3c, 0x50, 0x9f, 0x85, 0x04, 0x9b, 0x30, 0x83, 0x98, 0xc6, - 0x99, 0xe0, 0xf1, 0xf7, 0xd8, 0x0c, 0x3c, 0x6f, 0x72, 0xbb, 0x8a, 0x9c, 0x46, 0x48, 0x74, 0xe6, - 0x9d, 0xae, 0x31, 0x81, 0xfa, 0xb9, 0x0c, 0x71, 0x36, 0x3f, 0x23, 0xcc, 0xed, 0x20, 0xf6, 0xe3, - 0x1f, 0xcb, 0xfe, 0x55, 0x73, 0x11, 0x6d, 0x57, 0x11, 0xd6, 0x87, 0xab, 0x40, 0x75, 0xbd, 0x1f, - 0x3d, 0xb0, 0x40, 0xf5, 0x45, 0xfb, 0x38, 0xd0, 0xf9, 0x43, 0x12, 0x17, 0xd2, 0x87, 0x03, 0x8a, - 0xfa, 0xa7, 0x04, 0x69, 0x1e, 0xbb, 0x0f, 0x89, 0xbf, 0x3b, 0x12, 0x64, 0xbd, 0x1e, 0xf8, 0x49, - 0xf1, 0xca, 0x96, 0x86, 0x37, 0x51, 0xb0, 0x2b, 0x16, 0xb8, 0x4d, 0xec, 0x52, 0x8d, 0xb5, 0x6b, - 0x1a, 0x9c, 0xc2, 0x98, 0x08, 0xdc, 0x05, 0xef, 0xeb, 0xde, 0xc0, 0xed, 0x1b, 0x14, 0x0c, 0x65, - 0x05, 0xd2, 0x81, 0x83, 0x55, 0xd3, 0xe0, 0x3e, 0xa7, 0x97, 0xb3, 0xad, 0x66, 0x2e, 0xd5, 0x39, - 0x83, 0xed, 0x69, 0x22, 0xd5, 0x31, 0x2a, 0x18, 0xbb, 0x93, 0x47, 0x6c, 0x9f, 0xc9, 0xe3, 0x3b, - 0x19, 0xa6, 0xd7, 0x51, 0x0d, 0x39, 0x2e, 0x0a, 0xba, 0xf9, 0x84, 0xc6, 0xf6, 0x4d, 0x90, 0x5d, - 0xdd, 0x1b, 0xda, 0xf3, 0x8f, 0x31, 0xb4, 0x21, 0xe0, 0x77, 0xb5, 0x2e, 0xbb, 0x3a, 0x5b, 0xb2, - 0xc8, 0x71, 0x48, 0xe8, 0x92, 0xe5, 0x0f, 0x95, 0x2b, 0x10, 0x77, 0xb7, 0x2d, 0x97, 0x6a, 0xd4, - 0x3f, 0xc3, 0x5e, 0x6a, 0x35, 0x73, 0xf1, 0xe2, 0xda, 0xa5, 0x62, 0xe9, 0x62, 0x69, 0xb5, 0xdb, - 0xe8, 0xef, 0x66, 0xee, 0x84, 0x83, 0x0c, 0x4d, 0xa7, 0xe7, 0x55, 0x4c, 0xb0, 0x8b, 0xb0, 0x6b, - 0x52, 0xb3, 0x86, 0xd4, 0xf5, 0x76, 0x23, 0xea, 0x0f, 0x12, 0x28, 0xaf, 0x9a, 0xd8, 0x74, 0xb7, - 0x86, 0x99, 0x9e, 0xfa, 0xb3, 0x04, 0xb3, 0xbd, 0x6e, 0xac, 0x13, 0xcb, 0xda, 0xd0, 0xf4, 0x9b, - 0x43, 0xe7, 0xce, 0x7b, 0xed, 0x8a, 0xa8, 0xd4, 0xb0, 0xd1, 0x50, 0x65, 0x76, 0xda, 0xb0, 0x07, - 0x9e, 0x05, 0xc7, 0x99, 0xa0, 0x3b, 0x49, 0x8d, 0x0d, 0x4a, 0x52, 0xf7, 0x24, 0x51, 0xe2, 0x0c, - 0x31, 0x88, 0xd8, 0x60, 0x10, 0xea, 0x7d, 0x09, 0x12, 0xe2, 0x98, 0x38, 0xba, 0x3e, 0x7e, 0x22, - 0x03, 0x78, 0x57, 0x22, 0x91, 0x75, 0x52, 0x39, 0x0b, 0x69, 0x7e, 0x1f, 0xb2, 0xc7, 0x12, 0x48, - 0x62, 0x54, 0x2f, 0xf9, 0x60, 0x1e, 0x49, 0x30, 0xe9, 0xd5, 0xa6, 0x2c, 0xa7, 0xb9, 0xd4, 0xd4, - 0xdd, 0xc8, 0x5e, 0x88, 0x7c, 0x14, 0x83, 0x93, 0xd7, 0xb0, 0xab, 0xdd, 0x40, 0xd7, 0x6c, 0x17, - 0x39, 0xb4, 0x53, 0x24, 0x0c, 0x8f, 0xcb, 0xaf, 0xc0, 0xb4, 0xed, 0xa0, 0x9a, 0x49, 0xaa, 0x6e, - 0xe0, 0x4a, 0x3a, 0xc4, 0x77, 0xc5, 0x97, 0x06, 0x3c, 0x7d, 0x41, 0xdc, 0x9a, 0x05, 0x6c, 0xc3, - 0x6f, 0xfd, 0x03, 0x66, 0xa7, 0x60, 0xec, 0x06, 0x71, 0x74, 0x91, 0xf7, 0x7b, 0x0f, 0xe1, 0xc4, - 0x43, 0xe5, 0x0c, 0xa4, 0xf8, 0x87, 0x32, 0x26, 0xd4, 0xd4, 0x11, 0x3f, 0xb1, 0xeb, 0x33, 0x03, - 0xb9, 0xe6, 0x32, 0x97, 0xa8, 0xdf, 0xb7, 0x07, 0x25, 0x8f, 0x2c, 0x44, 0xd1, 0x30, 0x0e, 0xca, - 0x39, 0x48, 0x88, 0x57, 0x8c, 0x3a, 0x25, 0xe4, 0x2c, 0x2b, 0x8c, 0xc4, 0xeb, 0x44, 0x3d, 0x05, - 0x64, 0x5c, 0x88, 0x0b, 0x86, 0xf2, 0x3a, 0x4c, 0x7a, 0x86, 0xde, 0x6b, 0x18, 0xed, 0x0b, 0xac, - 0xb9, 0x56, 0x33, 0x97, 0x11, 0xf6, 0x22, 0x73, 0xf7, 0xb4, 0x92, 0xb1, 0x83, 0x4f, 0x0d, 0x45, - 0x85, 0x51, 0xbe, 0x0c, 0xfa, 0xe7, 0x33, 0xfe, 0xac, 0x33, 0x8a, 0xe3, 0xfb, 0x19, 0xc5, 0x89, - 0xbd, 0x47, 0xf1, 0x97, 0x51, 0x98, 0x0d, 0x2e, 0x2d, 0xb6, 0xde, 0x5c, 0x5b, 0xd3, 0xd1, 0x2a, - 0xa6, 0x4e, 0xe3, 0x68, 0x24, 0x0f, 0x7c, 0x24, 0x2f, 0x40, 0xb2, 0x1d, 0x07, 0x4c, 0x83, 0x8f, - 0xa7, 0xb7, 0x6f, 0xb9, 0xea, 0x7d, 0xdd, 0xbb, 0x6f, 0xf1, 0x0d, 0x0a, 0x46, 0x67, 0x22, 0x4c, - 0x0c, 0x9a, 0x08, 0x2f, 0xc1, 0xd4, 0x0d, 0xcd, 0xb4, 0x90, 0x51, 0xae, 0x69, 0x96, 0x69, 0xf0, - 0x4d, 0xcf, 0x4c, 0xbc, 0xaf, 0xc5, 0xa4, 0x10, 0x5e, 0x6f, 0xeb, 0x98, 0x71, 0xc7, 0xaa, 0xcc, - 0x37, 0x10, 0xee, 0x4c, 0xa2, 0xaf, 0x4b, 0x93, 0x1d, 0xe1, 0x2a, 0xd7, 0xa9, 0x3f, 0xc5, 0xfc, - 0xf9, 0x24, 0xa2, 0xc2, 0xd1, 0x7c, 0x1a, 0xd2, 0xc8, 0xb0, 0xfc, 0xcc, 0xed, 0xfb, 0xd9, 0x91, - 0xdb, 0xad, 0xac, 0x74, 0xa7, 0x95, 0x95, 0xee, 0xb6, 0xb2, 0xd2, 0xbd, 0x56, 0x56, 0xfa, 0x70, - 0x27, 0x3b, 0x72, 0x67, 0x27, 0x3b, 0x72, 0x77, 0x27, 0x3b, 0xf2, 0xd6, 0x84, 0x47, 0x75, 0x63, - 0x9c, 0xff, 0x6b, 0xc6, 0xd2, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x44, 0x7e, 0x1a, 0xa6, 0x10, - 0x32, 0x00, 0x00, + proto.RegisterFile("util/log/eventpb/ddl_events.proto", fileDescriptor_ddl_events_329fe5af9ff3ec61) +} + +var fileDescriptor_ddl_events_329fe5af9ff3ec61 = []byte{ + // 1586 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x9b, 0xcf, 0x6f, 0x1b, 0x45, + 0x1b, 0xc7, 0xb3, 0xeb, 0xfc, 0xb0, 0x1f, 0xff, 0x68, 0xb2, 0x69, 0xfb, 0xe6, 0x8d, 0xde, 0xd7, + 0xce, 0xbb, 0xea, 0x21, 0xaf, 0xa0, 0x89, 0xda, 0x00, 0x95, 0x8a, 0x0a, 0x6a, 0xe2, 0x80, 0x8c, + 0x4a, 0xdb, 0xc4, 0x6e, 0x85, 0xb8, 0x58, 0x9b, 0xdd, 0x69, 0xb2, 0x74, 0x3d, 0xb3, 0xd9, 0x1d, + 0xdb, 0xf8, 0x0f, 0x40, 0x42, 0x42, 0x42, 0x08, 0x21, 0x2e, 0x5c, 0x38, 0x20, 0x15, 0x71, 0x00, + 0x89, 0x0b, 0x12, 0xe2, 0x06, 0x88, 0x1e, 0x00, 0x55, 0x70, 0xe9, 0xc9, 0x6a, 0x1d, 0xa9, 0x95, + 0x2a, 0xe0, 0x80, 0x90, 0xb8, 0xa2, 0x99, 0xd9, 0xb5, 0xd7, 0xb1, 0xd7, 0x69, 0xa4, 0xf4, 0xe0, + 0x4d, 0x6e, 0x8e, 0xf7, 0xfb, 0xcc, 0xf8, 0xf9, 0xcc, 0xcc, 0xf3, 0x3c, 0x33, 0xb3, 0x81, 0xff, + 0x55, 0xa9, 0x69, 0x2d, 0x5a, 0x64, 0x73, 0x11, 0xd5, 0x10, 0xa6, 0xf6, 0xc6, 0xa2, 0x61, 0x58, + 0x65, 0xfe, 0xd9, 0x5d, 0xb0, 0x1d, 0x42, 0x89, 0x32, 0xab, 0x13, 0xfd, 0xa6, 0x43, 0x34, 0x7d, + 0x6b, 0x81, 0x89, 0x17, 0x2c, 0xb2, 0xb9, 0xe0, 0x89, 0x67, 0x8f, 0x6f, 0x92, 0x4d, 0xc2, 0x65, + 0x8b, 0xec, 0x93, 0xb0, 0x98, 0xfd, 0x6f, 0x4f, 0xa3, 0xc1, 0x06, 0xd5, 0x3f, 0x24, 0xc8, 0xac, + 0x38, 0x48, 0xa3, 0x28, 0xaf, 0x51, 0x6d, 0x43, 0x73, 0x91, 0x52, 0x82, 0x71, 0x9d, 0x54, 0x2a, + 0x04, 0xcf, 0x48, 0x73, 0xd2, 0x7c, 0xf2, 0xec, 0xc2, 0x42, 0x78, 0xa7, 0x0b, 0x2b, 0x5c, 0xb9, + 0xca, 0xfe, 0xca, 0x23, 0xaa, 0x99, 0x96, 0xbb, 0x9c, 0xba, 0xdd, 0xcc, 0x8d, 0xdc, 0x69, 0xe6, + 0xa4, 0x47, 0xcd, 0xdc, 0xc8, 0xba, 0xd7, 0x96, 0xb2, 0x06, 0x31, 0x77, 0xdb, 0x9a, 0x91, 0x79, + 0x93, 0x67, 0xf6, 0x6e, 0xb2, 0xb8, 0x76, 0x69, 0x40, 0xab, 0xac, 0x2d, 0x65, 0x09, 0xd2, 0x86, + 0xf7, 0xa3, 0xcb, 0x58, 0xab, 0xa0, 0x99, 0xd8, 0x9c, 0x34, 0x9f, 0x58, 0xce, 0x3c, 0x6a, 0xe6, + 0xe0, 0x69, 0x52, 0x31, 0x29, 0xaa, 0xd8, 0xb4, 0xb1, 0x9e, 0xf2, 0x45, 0x97, 0xb5, 0x0a, 0x52, + 0xbf, 0x94, 0x21, 0x95, 0x77, 0x88, 0x7d, 0x38, 0xdc, 0x55, 0xf2, 0x70, 0xd2, 0x70, 0x88, 0x6d, + 0x23, 0xa3, 0xec, 0xea, 0x5b, 0xa8, 0xa2, 0x95, 0xc9, 0xc6, 0x1b, 0x48, 0xa7, 0xee, 0xcc, 0xe8, + 0x5c, 0xac, 0x8f, 0xf5, 0x71, 0x4f, 0x5d, 0xe4, 0xe2, 0x2b, 0x42, 0xab, 0xde, 0x92, 0xe1, 0xe4, + 0x45, 0x8b, 0x22, 0xc7, 0xa7, 0x76, 0xd1, 0x30, 0xd6, 0xd1, 0xa6, 0x49, 0x70, 0xc4, 0xf1, 0x2d, + 0x42, 0xd2, 0xe1, 0x7e, 0x0a, 0x93, 0xd1, 0xbe, 0x26, 0x20, 0x24, 0x7c, 0x7a, 0x7d, 0x2a, 0xc3, + 0xbf, 0xba, 0x48, 0xb1, 0xb9, 0x76, 0x84, 0xaa, 0x1f, 0xaa, 0xaf, 0x65, 0x98, 0xed, 0x42, 0x75, + 0xd5, 0x31, 0x2b, 0x9a, 0xd3, 0x38, 0x14, 0xb4, 0x5e, 0x80, 0x69, 0x5b, 0xb8, 0x5b, 0xde, 0x9b, + 0xda, 0x94, 0x1d, 0x24, 0xc3, 0xe1, 0x7d, 0x2e, 0xc3, 0xbf, 0xbb, 0xe0, 0x15, 0xab, 0x4e, 0xcd, + 0xac, 0x69, 0xd6, 0xcb, 0x44, 0xb3, 0x22, 0xce, 0x6e, 0x09, 0xd2, 0xae, 0xe7, 0x6d, 0x79, 0x93, + 0x68, 0x56, 0x08, 0xb5, 0x94, 0x1b, 0x40, 0xa2, 0x7e, 0x26, 0x43, 0x66, 0x1d, 0xb1, 0x3e, 0x0e, + 0x49, 0xe4, 0x3f, 0x0f, 0x53, 0x18, 0xd5, 0xcb, 0xdd, 0x86, 0xfd, 0x49, 0x1d, 0xc3, 0xa8, 0x9e, + 0x0f, 0x26, 0xc9, 0x2f, 0x64, 0x38, 0xb6, 0x42, 0x70, 0x0d, 0x39, 0xb4, 0x44, 0x44, 0x2a, 0x88, + 0xfe, 0x7a, 0xec, 0xa2, 0x65, 0x6b, 0x0e, 0xc2, 0x34, 0x6c, 0x3d, 0x06, 0x78, 0x5d, 0xe5, 0x42, + 0xf5, 0x5d, 0x19, 0x52, 0xa2, 0x8e, 0x1a, 0x36, 0x5c, 0x8b, 0x90, 0xf4, 0x2a, 0x83, 0x01, 0xb0, + 0x40, 0x48, 0x38, 0xaa, 0x53, 0x30, 0x46, 0xea, 0x18, 0x39, 0x21, 0x70, 0xc4, 0x43, 0xf5, 0x81, + 0x04, 0xc0, 0x72, 0x5f, 0xd4, 0x71, 0xa8, 0x1f, 0xcb, 0x90, 0x12, 0x81, 0x25, 0xf2, 0x23, 0xff, + 0x1c, 0xb0, 0x48, 0x51, 0x0e, 0x1a, 0xf5, 0x9f, 0x03, 0x69, 0x8c, 0xea, 0xc5, 0x0e, 0xa2, 0x07, + 0x32, 0x24, 0x8a, 0x88, 0x0e, 0x1b, 0x9f, 0x73, 0x70, 0xcc, 0x40, 0xae, 0xee, 0x98, 0x36, 0x25, + 0xce, 0x20, 0x46, 0x99, 0x8e, 0xac, 0x2b, 0x98, 0xec, 0x32, 0x1e, 0x10, 0x4c, 0xba, 0xed, 0xbb, + 0x3b, 0xa6, 0x0d, 0x1b, 0xcd, 0x8c, 0xed, 0xd5, 0x71, 0xa9, 0x61, 0x23, 0xf5, 0x1d, 0x19, 0x92, + 0x22, 0x0a, 0x95, 0xb4, 0x0d, 0x6b, 0x88, 0x32, 0xdc, 0x69, 0x00, 0xca, 0x7e, 0xf1, 0x20, 0xca, + 0x09, 0xae, 0xd8, 0x47, 0x08, 0xba, 0x25, 0x43, 0x82, 0x85, 0xa0, 0x68, 0xb3, 0x58, 0x86, 0x13, + 0xba, 0xe6, 0xea, 0x9a, 0x81, 0xca, 0xfe, 0x4e, 0xaf, 0x66, 0xa2, 0x7a, 0xd8, 0x06, 0x6f, 0xda, + 0x13, 0xe7, 0x85, 0xf6, 0x3a, 0x93, 0xaa, 0x1f, 0xc9, 0x90, 0x14, 0x31, 0x2c, 0xda, 0xac, 0x9e, + 0x81, 0x0c, 0x5b, 0x98, 0x01, 0x93, 0x90, 0xd2, 0x11, 0xa3, 0x7a, 0xc9, 0xb7, 0x52, 0x1f, 0x4a, + 0x90, 0x2e, 0x39, 0x55, 0xac, 0x47, 0x7d, 0x5d, 0xa9, 0x0f, 0x65, 0x00, 0xbe, 0xab, 0x88, 0xf6, + 0x34, 0xb8, 0x00, 0xc9, 0x4a, 0x95, 0x6a, 0x94, 0x6d, 0xbb, 0x4c, 0x83, 0xcf, 0x81, 0xf4, 0xf2, + 0x7f, 0x5a, 0xcd, 0x1c, 0xbc, 0xea, 0x7d, 0x5d, 0xc8, 0xef, 0x4e, 0x83, 0xbe, 0x41, 0xc1, 0x08, + 0x5f, 0x71, 0x63, 0x8f, 0xbf, 0xe2, 0xfe, 0xe6, 0x15, 0x76, 0xa5, 0x82, 0x30, 0xbd, 0x82, 0x57, + 0x88, 0x55, 0xad, 0xe0, 0xc8, 0xe2, 0x5e, 0x84, 0xa4, 0xce, 0x3d, 0x1c, 0x78, 0x32, 0x20, 0x24, + 0xdc, 0x60, 0x1e, 0x26, 0x74, 0xc1, 0x26, 0x24, 0xef, 0xf9, 0x8f, 0x95, 0x33, 0x90, 0xc2, 0x55, + 0xcb, 0x2a, 0xfb, 0xf2, 0xf1, 0x39, 0x69, 0x3e, 0xde, 0x23, 0x4f, 0x32, 0x8d, 0x47, 0x5b, 0xfd, + 0x59, 0x86, 0xa9, 0x36, 0xf9, 0x43, 0xb2, 0x17, 0x0c, 0x00, 0x1d, 0x3d, 0x70, 0xa0, 0xdf, 0xcb, + 0x90, 0x69, 0x03, 0x8d, 0x76, 0xe0, 0x78, 0xa2, 0x1c, 0xff, 0x0a, 0x72, 0x2c, 0x60, 0x03, 0xbd, + 0x19, 0x59, 0x8e, 0xa7, 0x01, 0x4c, 0xe6, 0xe0, 0xa0, 0x80, 0x90, 0xe0, 0x8a, 0x27, 0x1f, 0x0f, + 0x7e, 0x6d, 0xd7, 0xcc, 0x47, 0xcc, 0x03, 0xcc, 0x77, 0xe5, 0xc8, 0xb1, 0xfd, 0xe5, 0x48, 0xf5, + 0xad, 0x98, 0xa8, 0xbd, 0x8f, 0x98, 0x1e, 0x18, 0x53, 0xe5, 0x6c, 0x58, 0xdd, 0x31, 0xce, 0xea, + 0x8e, 0xfe, 0x75, 0xc6, 0x2f, 0x7e, 0x45, 0x77, 0x34, 0x10, 0x07, 0x37, 0xb9, 0xbf, 0x92, 0x01, + 0x44, 0xc8, 0x60, 0x90, 0x87, 0x07, 0xea, 0x53, 0x90, 0x60, 0x13, 0x66, 0x10, 0xd3, 0x38, 0x13, + 0x3c, 0xfe, 0x1e, 0x9b, 0x81, 0xe7, 0x4d, 0x6e, 0x57, 0x91, 0xd3, 0x08, 0x89, 0xce, 0xbc, 0xd3, + 0x35, 0x26, 0x50, 0x3f, 0x91, 0x21, 0xce, 0xe6, 0x67, 0x84, 0xb9, 0x1d, 0xc4, 0x7e, 0xfc, 0x03, + 0xd9, 0xbf, 0x95, 0x2f, 0xa2, 0xed, 0x2a, 0xc2, 0xfa, 0x70, 0x15, 0xa8, 0xae, 0xf7, 0xa3, 0x07, + 0x16, 0xa8, 0xbe, 0x68, 0x1f, 0x07, 0x3a, 0xbf, 0x49, 0xe2, 0xee, 0xfe, 0x70, 0x40, 0x51, 0x7f, + 0x97, 0x20, 0xcd, 0x63, 0xf7, 0x21, 0xf1, 0x77, 0x47, 0x82, 0xac, 0xd7, 0x03, 0x3f, 0x29, 0x5e, + 0xd9, 0xd2, 0xf0, 0x26, 0x0a, 0x76, 0xc5, 0x02, 0xb7, 0x89, 0x5d, 0xaa, 0xb1, 0x76, 0x4d, 0x83, + 0x53, 0x18, 0x13, 0x81, 0xbb, 0xe0, 0x7d, 0xdd, 0x1b, 0xb8, 0x7d, 0x83, 0x82, 0xa1, 0xac, 0x40, + 0x3a, 0x70, 0xb0, 0x6a, 0x1a, 0xdc, 0xe7, 0xf4, 0x72, 0xb6, 0xd5, 0xcc, 0xa5, 0x3a, 0x67, 0xb0, + 0x3d, 0x4d, 0xa4, 0x3a, 0x46, 0x05, 0x63, 0x77, 0xf2, 0x88, 0xed, 0x33, 0x79, 0x7c, 0x23, 0xc3, + 0xf4, 0x3a, 0xaa, 0x21, 0xc7, 0x45, 0x41, 0x37, 0x9f, 0xd0, 0xd8, 0xbe, 0x06, 0xb2, 0xab, 0x7b, + 0x43, 0x7b, 0xfe, 0x31, 0x86, 0x36, 0x04, 0xfc, 0xae, 0xd6, 0x65, 0x57, 0x67, 0x4b, 0x16, 0x39, + 0x0e, 0x09, 0x5d, 0xb2, 0xfc, 0xa1, 0x72, 0x05, 0xe2, 0xee, 0xb6, 0xe5, 0x52, 0x8d, 0xfa, 0x67, + 0xd8, 0x4b, 0xad, 0x66, 0x2e, 0x5e, 0x5c, 0xbb, 0x54, 0x2c, 0x5d, 0x2c, 0xad, 0x76, 0x1b, 0xfd, + 0xd9, 0xcc, 0x9d, 0x70, 0x90, 0xa1, 0xe9, 0xf4, 0xbc, 0x8a, 0x09, 0x76, 0x11, 0x76, 0x4d, 0x6a, + 0xd6, 0x90, 0xba, 0xde, 0x6e, 0x44, 0xfd, 0x4e, 0x02, 0xe5, 0x25, 0x13, 0x9b, 0xee, 0xd6, 0x30, + 0xd3, 0x53, 0x7f, 0x94, 0x60, 0xb6, 0xd7, 0x8d, 0x75, 0x62, 0x59, 0x1b, 0x9a, 0x7e, 0x73, 0xe8, + 0xdc, 0x79, 0xbb, 0x5d, 0x11, 0x95, 0x1a, 0x36, 0x1a, 0xaa, 0xcc, 0x4e, 0x1b, 0xf6, 0xc0, 0xb3, + 0xe0, 0x38, 0x13, 0x74, 0x27, 0xa9, 0xb1, 0x41, 0x49, 0xea, 0x9e, 0x24, 0x4a, 0x9c, 0x21, 0x06, + 0x11, 0x1b, 0x0c, 0x42, 0xbd, 0x2f, 0x41, 0x42, 0x1c, 0x13, 0x47, 0xd7, 0xc7, 0x0f, 0x65, 0x00, + 0xef, 0x4a, 0x24, 0xb2, 0x4e, 0x2a, 0x67, 0x21, 0xcd, 0xef, 0x43, 0xf6, 0x58, 0x02, 0x49, 0x8c, + 0xea, 0x25, 0x1f, 0xcc, 0x23, 0x09, 0x26, 0xbd, 0xda, 0x94, 0xe5, 0x34, 0x97, 0x9a, 0xba, 0x1b, + 0xd9, 0x0b, 0x91, 0xf7, 0x63, 0x70, 0xf2, 0x1a, 0x76, 0xb5, 0x1b, 0xe8, 0x9a, 0xed, 0x22, 0x87, + 0x76, 0x8a, 0x84, 0xe1, 0x71, 0xf9, 0x45, 0x98, 0xb6, 0x1d, 0x54, 0x33, 0x49, 0xd5, 0x0d, 0x5c, + 0x49, 0x87, 0xf8, 0xae, 0xf8, 0xd2, 0x80, 0xa7, 0xcf, 0x8a, 0x5b, 0xb3, 0x80, 0x6d, 0xf8, 0xad, + 0x7f, 0xc0, 0xec, 0x14, 0x8c, 0xdd, 0x20, 0x8e, 0x2e, 0xf2, 0x7e, 0xef, 0x21, 0x9c, 0x78, 0xa8, + 0x9c, 0x81, 0x14, 0xff, 0x50, 0xc6, 0x84, 0x9a, 0x3a, 0xe2, 0x27, 0x76, 0x7d, 0x66, 0x20, 0xd7, + 0x5c, 0xe6, 0x12, 0xf5, 0xdb, 0xf6, 0xa0, 0xe4, 0x91, 0x85, 0x28, 0x1a, 0xc6, 0x41, 0x39, 0x07, + 0x09, 0xf1, 0x8a, 0x51, 0xa7, 0x84, 0x9c, 0x65, 0x85, 0x91, 0x78, 0x9d, 0xa8, 0xa7, 0x80, 0x8c, + 0x0b, 0x71, 0xc1, 0x50, 0x5e, 0x81, 0x49, 0xcf, 0xd0, 0x7b, 0x0d, 0xa3, 0x7d, 0x81, 0x35, 0xd7, + 0x6a, 0xe6, 0x32, 0xc2, 0x5e, 0x64, 0xee, 0x9e, 0x56, 0x32, 0x76, 0xf0, 0xa9, 0xa1, 0xa8, 0x30, + 0xca, 0x97, 0x41, 0xff, 0x7c, 0xc6, 0x9f, 0x75, 0x46, 0x71, 0x7c, 0x3f, 0xa3, 0x38, 0xb1, 0xf7, + 0x28, 0xfe, 0x34, 0x0a, 0xb3, 0xc1, 0xa5, 0xc5, 0xd6, 0x9b, 0x6b, 0x6b, 0x3a, 0x5a, 0xc5, 0xd4, + 0x69, 0x1c, 0x8d, 0xe4, 0x81, 0x8f, 0xe4, 0x05, 0x48, 0xb6, 0xe3, 0x80, 0x69, 0xf0, 0xf1, 0xf4, + 0xf6, 0x2d, 0x57, 0xbd, 0xaf, 0x7b, 0xf7, 0x2d, 0xbe, 0x41, 0xc1, 0xe8, 0x4c, 0x84, 0x89, 0x41, + 0x13, 0xe1, 0x79, 0x98, 0xba, 0xa1, 0x99, 0x16, 0x32, 0xca, 0x35, 0xcd, 0x32, 0x0d, 0xbe, 0xe9, + 0x99, 0x89, 0xf7, 0xb5, 0x98, 0x14, 0xc2, 0xeb, 0x6d, 0x1d, 0x33, 0xee, 0x58, 0x95, 0xf9, 0x06, + 0xc2, 0x9d, 0x49, 0xf4, 0x75, 0x69, 0xb2, 0x23, 0x5c, 0xe5, 0x3a, 0xf5, 0x87, 0x98, 0x3f, 0x9f, + 0x44, 0x54, 0x38, 0x9a, 0x4f, 0x43, 0x1a, 0x19, 0x96, 0xff, 0x7f, 0xfb, 0x7e, 0x76, 0xe4, 0x76, + 0x2b, 0x2b, 0xdd, 0x69, 0x65, 0xa5, 0xbb, 0xad, 0xac, 0x74, 0xaf, 0x95, 0x95, 0xde, 0xdb, 0xc9, + 0x8e, 0xdc, 0xd9, 0xc9, 0x8e, 0xdc, 0xdd, 0xc9, 0x8e, 0xbc, 0x3e, 0xe1, 0x51, 0xdd, 0x18, 0xe7, + 0xff, 0xc5, 0xb2, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe1, 0x94, 0x9d, 0xd7, 0x3b, 0x33, + 0x00, 0x00, } diff --git a/pkg/util/log/eventpb/ddl_events.proto b/pkg/util/log/eventpb/ddl_events.proto index d2884a6ca6b3..fba970451ab4 100644 --- a/pkg/util/log/eventpb/ddl_events.proto +++ b/pkg/util/log/eventpb/ddl_events.proto @@ -60,6 +60,16 @@ message AlterDatabaseAddRegion { string region_name = 4 [(gogoproto.jsontag) = ",omitempty"]; } +// AlterDatabaseAddRegion is recorded when a region is added to a database. +message AlterDatabaseDropRegion { + CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + CommonSQLEventDetails sql = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + // The name of the database. + string database_name = 3 [(gogoproto.jsontag) = ",omitempty"]; + // The region being dropped. + string region_name = 4 [(gogoproto.jsontag) = ",omitempty"]; +} + // AlterDatabasePrimaryRegion is recorded when a primary region is added/modified. message AlterDatabasePrimaryRegion { CommonEventDetails common = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; diff --git a/pkg/util/log/eventpb/eventlog_channels_generated.go b/pkg/util/log/eventpb/eventlog_channels_generated.go index 1887c51a0d78..8eca731b99a5 100644 --- a/pkg/util/log/eventpb/eventlog_channels_generated.go +++ b/pkg/util/log/eventpb/eventlog_channels_generated.go @@ -34,6 +34,9 @@ func (m *QueryExecute) LoggingChannel() logpb.Channel { return logpb.Channel_SQL // LoggingChannel implements the EventPayload interface. func (m *AlterDatabaseAddRegion) LoggingChannel() logpb.Channel { return logpb.Channel_SQL_SCHEMA } +// LoggingChannel implements the EventPayload interface. +func (m *AlterDatabaseDropRegion) LoggingChannel() logpb.Channel { return logpb.Channel_SQL_SCHEMA } + // LoggingChannel implements the EventPayload interface. func (m *AlterDatabasePrimaryRegion) LoggingChannel() logpb.Channel { return logpb.Channel_SQL_SCHEMA } diff --git a/pkg/util/log/eventpb/json_encode_generated.go b/pkg/util/log/eventpb/json_encode_generated.go index dbe433378af1..439ebf5f0306 100644 --- a/pkg/util/log/eventpb/json_encode_generated.go +++ b/pkg/util/log/eventpb/json_encode_generated.go @@ -43,6 +43,40 @@ func (m *AlterDatabaseAddRegion) AppendJSONFields(printComma bool, b redact.Reda return printComma, b } +// AppendJSONFields implements the EventPayload interface. +func (m *AlterDatabaseDropRegion) AppendJSONFields(printComma bool, b redact.RedactableBytes) (bool, redact.RedactableBytes) { + + printComma, b = m.CommonEventDetails.AppendJSONFields(printComma, b) + + printComma, b = m.CommonSQLEventDetails.AppendJSONFields(printComma, b) + + if m.DatabaseName != "" { + if printComma { + b = append(b, ',') + } + printComma = true + b = append(b, "\"DatabaseName\":\""...) + b = append(b, redact.StartMarker()...) + b = redact.RedactableBytes(jsonbytes.EncodeString([]byte(b), string(redact.EscapeMarkers([]byte(m.DatabaseName))))) + b = append(b, redact.EndMarker()...) + b = append(b, '"') + } + + if m.RegionName != "" { + if printComma { + b = append(b, ',') + } + printComma = true + b = append(b, "\"RegionName\":\""...) + b = append(b, redact.StartMarker()...) + b = redact.RedactableBytes(jsonbytes.EncodeString([]byte(b), string(redact.EscapeMarkers([]byte(m.RegionName))))) + b = append(b, redact.EndMarker()...) + b = append(b, '"') + } + + return printComma, b +} + // AppendJSONFields implements the EventPayload interface. func (m *AlterDatabaseOwner) AppendJSONFields(printComma bool, b redact.RedactableBytes) (bool, redact.RedactableBytes) {