diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html index 81c75d1ecda7..237467a83db6 100644 --- a/docs/generated/settings/settings.html +++ b/docs/generated/settings/settings.html @@ -100,6 +100,6 @@ trace.debug.enablebooleanfalseif set, traces for recent requests can be seen at https:///debug/requests trace.lightstep.tokenstringif set, traces go to Lightstep using this token trace.zipkin.collectorstringif set, traces go to the given Zipkin instance (example: '127.0.0.1:9411'); ignored if trace.lightstep.token is set -versionversion20.2-18set the active cluster version in the format '.' +versionversion20.2-24set the active cluster version in the format '.' diff --git a/pkg/base/testing_knobs.go b/pkg/base/testing_knobs.go index 1d96537b8d43..04a62ea6ae01 100644 --- a/pkg/base/testing_knobs.go +++ b/pkg/base/testing_knobs.go @@ -36,4 +36,5 @@ type TestingKnobs struct { TenantTestingKnobs ModuleTestingKnobs JobsTestingKnobs ModuleTestingKnobs BackupRestore ModuleTestingKnobs + MigrationManager ModuleTestingKnobs } diff --git a/pkg/ccl/backupccl/backupbase/system_schema.go b/pkg/ccl/backupccl/backupbase/system_schema.go index 4dff80b543e0..486d4e809ce7 100644 --- a/pkg/ccl/backupccl/backupbase/system_schema.go +++ b/pkg/ccl/backupccl/backupbase/system_schema.go @@ -231,6 +231,9 @@ var SystemTableBackupConfiguration = map[string]systemBackupConfiguration{ systemschema.WebSessionsTable.GetName(): { IncludeInClusterBackup: OptOutOfClusterBackup, }, + systemschema.MigrationsTable.GetName(): { + IncludeInClusterBackup: OptOutOfClusterBackup, + }, } // GetSystemTablesToIncludeInClusterBackup returns a set of system table names that diff --git a/pkg/cli/testdata/doctor/testcluster b/pkg/cli/testdata/doctor/testcluster index 6d175ad4183f..2953ab3a8ff3 100644 --- a/pkg/cli/testdata/doctor/testcluster +++ b/pkg/cli/testdata/doctor/testcluster @@ -1,7 +1,7 @@ doctor cluster ---- debug doctor cluster -Examining 34 descriptors and 35 namespace entries... +Examining 35 descriptors and 36 namespace entries... Table 53: ParentID 50, ParentSchemaID 29, Name 'foo': not being dropped but no namespace entry found Examining 1 running jobs... ERROR: validation failed diff --git a/pkg/cli/testdata/zip/partial1 b/pkg/cli/testdata/zip/partial1 index c1d256f48693..311bb9d521a7 100644 --- a/pkg/cli/testdata/zip/partial1 +++ b/pkg/cli/testdata/zip/partial1 @@ -58,7 +58,7 @@ requesting goroutine files for node 1... writing: debug/nodes/1/goroutines.err.t ^- resulted in ... requesting log file ... requesting log file ... -requesting ranges... 35 found +requesting ranges... 36 found writing: debug/nodes/1/ranges/1.json writing: debug/nodes/1/ranges/2.json writing: debug/nodes/1/ranges/3.json @@ -94,6 +94,7 @@ writing: debug/nodes/1/ranges/32.json writing: debug/nodes/1/ranges/33.json writing: debug/nodes/1/ranges/34.json writing: debug/nodes/1/ranges/35.json +writing: debug/nodes/1/ranges/36.json writing: debug/nodes/2/status.json using SQL connection URL for node 2: postgresql://... retrieving SQL data for crdb_internal.feature_usage... writing: debug/nodes/2/crdb_internal.feature_usage.txt @@ -194,7 +195,7 @@ requesting goroutine files for node 3... writing: debug/nodes/3/goroutines.err.t ^- resulted in ... requesting log file ... requesting log file ... -requesting ranges... 35 found +requesting ranges... 36 found writing: debug/nodes/3/ranges/1.json writing: debug/nodes/3/ranges/2.json writing: debug/nodes/3/ranges/3.json @@ -230,6 +231,7 @@ writing: debug/nodes/3/ranges/32.json writing: debug/nodes/3/ranges/33.json writing: debug/nodes/3/ranges/34.json writing: debug/nodes/3/ranges/35.json +writing: debug/nodes/3/ranges/36.json doctor examining cluster...No problems found! writing: debug/reports/doctor.txt requesting list of SQL databases... 3 found @@ -238,7 +240,7 @@ requesting database details for defaultdb... writing: debug/schema/defaultdb@det requesting database details for postgres... writing: debug/schema/postgres@details.json 0 tables found requesting database details for system... writing: debug/schema/system@details.json -29 tables found +30 tables found requesting table details for system.public.namespace... writing: debug/schema/system/public_namespace.json requesting table details for system.public.descriptor... writing: debug/schema/system/public_descriptor.json requesting table details for system.public.users... writing: debug/schema/system/public_users.json @@ -268,5 +270,6 @@ requesting table details for system.public.statement_diagnostics_requests... wri requesting table details for system.public.statement_diagnostics... writing: debug/schema/system/public_statement_diagnostics.json requesting table details for system.public.scheduled_jobs... writing: debug/schema/system/public_scheduled_jobs.json requesting table details for system.public.sqlliveness... writing: debug/schema/system/public_sqlliveness.json +requesting table details for system.public.migrations... writing: debug/schema/system/public_migrations.json writing: debug/pprof-summary.sh writing: debug/hot-ranges.sh diff --git a/pkg/cli/testdata/zip/partial1_excluded b/pkg/cli/testdata/zip/partial1_excluded index f3ed1e74423c..ce647914df19 100644 --- a/pkg/cli/testdata/zip/partial1_excluded +++ b/pkg/cli/testdata/zip/partial1_excluded @@ -58,7 +58,7 @@ requesting goroutine files for node 1... writing: debug/nodes/1/goroutines.err.t ^- resulted in ... requesting log file ... requesting log file ... -requesting ranges... 35 found +requesting ranges... 36 found writing: debug/nodes/1/ranges/1.json writing: debug/nodes/1/ranges/2.json writing: debug/nodes/1/ranges/3.json @@ -94,6 +94,7 @@ writing: debug/nodes/1/ranges/32.json writing: debug/nodes/1/ranges/33.json writing: debug/nodes/1/ranges/34.json writing: debug/nodes/1/ranges/35.json +writing: debug/nodes/1/ranges/36.json writing: debug/nodes/2.skipped writing: debug/nodes/3/status.json using SQL connection URL for node 3: postgresql://... @@ -125,7 +126,7 @@ requesting goroutine files for node 3... writing: debug/nodes/3/goroutines.err.t ^- resulted in ... requesting log file ... requesting log file ... -requesting ranges... 35 found +requesting ranges... 36 found writing: debug/nodes/3/ranges/1.json writing: debug/nodes/3/ranges/2.json writing: debug/nodes/3/ranges/3.json @@ -161,6 +162,7 @@ writing: debug/nodes/3/ranges/32.json writing: debug/nodes/3/ranges/33.json writing: debug/nodes/3/ranges/34.json writing: debug/nodes/3/ranges/35.json +writing: debug/nodes/3/ranges/36.json doctor examining cluster...No problems found! writing: debug/reports/doctor.txt requesting list of SQL databases... 3 found @@ -169,7 +171,7 @@ requesting database details for defaultdb... writing: debug/schema/defaultdb@det requesting database details for postgres... writing: debug/schema/postgres@details.json 0 tables found requesting database details for system... writing: debug/schema/system@details.json -29 tables found +30 tables found requesting table details for system.public.namespace... writing: debug/schema/system/public_namespace.json requesting table details for system.public.descriptor... writing: debug/schema/system/public_descriptor.json requesting table details for system.public.users... writing: debug/schema/system/public_users.json @@ -199,5 +201,6 @@ requesting table details for system.public.statement_diagnostics_requests... wri requesting table details for system.public.statement_diagnostics... writing: debug/schema/system/public_statement_diagnostics.json requesting table details for system.public.scheduled_jobs... writing: debug/schema/system/public_scheduled_jobs.json requesting table details for system.public.sqlliveness... writing: debug/schema/system/public_sqlliveness.json +requesting table details for system.public.migrations... writing: debug/schema/system/public_migrations.json writing: debug/pprof-summary.sh writing: debug/hot-ranges.sh diff --git a/pkg/cli/testdata/zip/partial2 b/pkg/cli/testdata/zip/partial2 index 728b1054bee9..2488a4aa8326 100644 --- a/pkg/cli/testdata/zip/partial2 +++ b/pkg/cli/testdata/zip/partial2 @@ -58,7 +58,7 @@ requesting goroutine files for node 1... writing: debug/nodes/1/goroutines.err.t ^- resulted in ... requesting log file ... requesting log file ... -requesting ranges... 35 found +requesting ranges... 36 found writing: debug/nodes/1/ranges/1.json writing: debug/nodes/1/ranges/2.json writing: debug/nodes/1/ranges/3.json @@ -94,6 +94,7 @@ writing: debug/nodes/1/ranges/32.json writing: debug/nodes/1/ranges/33.json writing: debug/nodes/1/ranges/34.json writing: debug/nodes/1/ranges/35.json +writing: debug/nodes/1/ranges/36.json writing: debug/nodes/3/status.json using SQL connection URL for node 3: postgresql://... retrieving SQL data for crdb_internal.feature_usage... writing: debug/nodes/3/crdb_internal.feature_usage.txt @@ -124,7 +125,7 @@ requesting goroutine files for node 3... writing: debug/nodes/3/goroutines.err.t ^- resulted in ... requesting log file ... requesting log file ... -requesting ranges... 35 found +requesting ranges... 36 found writing: debug/nodes/3/ranges/1.json writing: debug/nodes/3/ranges/2.json writing: debug/nodes/3/ranges/3.json @@ -160,6 +161,7 @@ writing: debug/nodes/3/ranges/32.json writing: debug/nodes/3/ranges/33.json writing: debug/nodes/3/ranges/34.json writing: debug/nodes/3/ranges/35.json +writing: debug/nodes/3/ranges/36.json doctor examining cluster...No problems found! writing: debug/reports/doctor.txt requesting list of SQL databases... 3 found @@ -168,7 +170,7 @@ requesting database details for defaultdb... writing: debug/schema/defaultdb@det requesting database details for postgres... writing: debug/schema/postgres@details.json 0 tables found requesting database details for system... writing: debug/schema/system@details.json -29 tables found +30 tables found requesting table details for system.public.namespace... writing: debug/schema/system/public_namespace.json requesting table details for system.public.descriptor... writing: debug/schema/system/public_descriptor.json requesting table details for system.public.users... writing: debug/schema/system/public_users.json @@ -198,5 +200,6 @@ requesting table details for system.public.statement_diagnostics_requests... wri requesting table details for system.public.statement_diagnostics... writing: debug/schema/system/public_statement_diagnostics.json requesting table details for system.public.scheduled_jobs... writing: debug/schema/system/public_scheduled_jobs.json requesting table details for system.public.sqlliveness... writing: debug/schema/system/public_sqlliveness.json +requesting table details for system.public.migrations... writing: debug/schema/system/public_migrations.json writing: debug/pprof-summary.sh writing: debug/hot-ranges.sh diff --git a/pkg/cli/testdata/zip/specialnames b/pkg/cli/testdata/zip/specialnames index 98fbbba190da..3035b5113d8b 100644 --- a/pkg/cli/testdata/zip/specialnames +++ b/pkg/cli/testdata/zip/specialnames @@ -22,7 +22,7 @@ requesting table details for defaultdb.public."../system"... writing: debug/sche requesting database details for postgres... writing: debug/schema/postgres@details.json 0 tables found requesting database details for system... writing: debug/schema/system-1@details.json -29 tables found +30 tables found requesting table details for system.public.namespace... writing: debug/schema/system-1/public_namespace.json requesting table details for system.public.descriptor... writing: debug/schema/system-1/public_descriptor.json requesting table details for system.public.users... writing: debug/schema/system-1/public_users.json @@ -52,3 +52,4 @@ requesting table details for system.public.statement_diagnostics_requests... wri requesting table details for system.public.statement_diagnostics... writing: debug/schema/system-1/public_statement_diagnostics.json requesting table details for system.public.scheduled_jobs... writing: debug/schema/system-1/public_scheduled_jobs.json requesting table details for system.public.sqlliveness... writing: debug/schema/system-1/public_sqlliveness.json +requesting table details for system.public.migrations... writing: debug/schema/system-1/public_migrations.json diff --git a/pkg/cli/testdata/zip/testzip b/pkg/cli/testdata/zip/testzip index 410c47f283fc..3b76fdc42544 100644 --- a/pkg/cli/testdata/zip/testzip +++ b/pkg/cli/testdata/zip/testzip @@ -57,7 +57,7 @@ requesting heap profile for node 1... writing: debug/nodes/1/heap.pprof requesting heap files for node 1... ? found requesting goroutine files for node 1... 0 found requesting log file ... -requesting ranges... 35 found +requesting ranges... 36 found writing: debug/nodes/1/ranges/1.json writing: debug/nodes/1/ranges/2.json writing: debug/nodes/1/ranges/3.json @@ -93,6 +93,7 @@ writing: debug/nodes/1/ranges/32.json writing: debug/nodes/1/ranges/33.json writing: debug/nodes/1/ranges/34.json writing: debug/nodes/1/ranges/35.json +writing: debug/nodes/1/ranges/36.json doctor examining cluster...No problems found! writing: debug/reports/doctor.txt requesting list of SQL databases... 3 found @@ -101,7 +102,7 @@ requesting database details for defaultdb... writing: debug/schema/defaultdb@det requesting database details for postgres... writing: debug/schema/postgres@details.json 0 tables found requesting database details for system... writing: debug/schema/system@details.json -29 tables found +30 tables found requesting table details for system.public.namespace... writing: debug/schema/system/public_namespace.json requesting table details for system.public.descriptor... writing: debug/schema/system/public_descriptor.json requesting table details for system.public.users... writing: debug/schema/system/public_users.json @@ -131,5 +132,6 @@ requesting table details for system.public.statement_diagnostics_requests... wri requesting table details for system.public.statement_diagnostics... writing: debug/schema/system/public_statement_diagnostics.json requesting table details for system.public.scheduled_jobs... writing: debug/schema/system/public_scheduled_jobs.json requesting table details for system.public.sqlliveness... writing: debug/schema/system/public_sqlliveness.json +requesting table details for system.public.migrations... writing: debug/schema/system/public_migrations.json writing: debug/pprof-summary.sh writing: debug/hot-ranges.sh diff --git a/pkg/clusterversion/cockroach_versions.go b/pkg/clusterversion/cockroach_versions.go index e15974015e4d..4be5ed066e8a 100644 --- a/pkg/clusterversion/cockroach_versions.go +++ b/pkg/clusterversion/cockroach_versions.go @@ -203,6 +203,24 @@ const ( CPutInline // ReplicaVersions enables the versioning of Replica state. ReplicaVersions + // replacedTruncatedAndRangeAppliedStateMigration stands in for + // TruncatedAndRangeAppliedStateMigration which was re-introduced after the + // migration job was introduced. This is necessary because the jobs + // infrastructure used to run this migration in v21.1 and its later alphas + // was introduced after this version was first introduced. Later code in the + // release relies on the job to run the migration but the job relies on + // its startup migrations having been run. Versions associated with long + // running migrations must follow LongRunningMigrations. + replacedTruncatedAndRangeAppliedStateMigration + // replacedPostTruncatedAndRangeAppliedStateMigration is like the above + // version. See its comment. + replacedPostTruncatedAndRangeAppliedStateMigration + // NewSchemaChanger enables the new schema changer. + NewSchemaChanger + // LongRunningMigrations introduces the LongRunningMigrations table and jobs. + // All versions which have a registered long-running migration must have a + // version higher than this version. + LongRunningMigrations // TruncatedAndRangeAppliedStateMigration is part of the migration to stop // using the legacy truncated state within KV. After the migration, we'll be // using the unreplicated truncated state and the RangeAppliedState on all @@ -217,8 +235,6 @@ const ( // using the replicated legacy TruncatedState. It's also used in asserting // that no replicated truncated state representation is found. PostTruncatedAndRangeAppliedStateMigration - // NewSchemaChanger enables the new schema changer. - NewSchemaChanger // Step (1): Add new versions here. ) @@ -350,17 +366,29 @@ var versionsSingleton = keyedVersions([]keyedVersion{ Version: roachpb.Version{Major: 20, Minor: 2, Internal: 12}, }, { - Key: TruncatedAndRangeAppliedStateMigration, + Key: replacedTruncatedAndRangeAppliedStateMigration, Version: roachpb.Version{Major: 20, Minor: 2, Internal: 14}, }, { - Key: PostTruncatedAndRangeAppliedStateMigration, + Key: replacedPostTruncatedAndRangeAppliedStateMigration, Version: roachpb.Version{Major: 20, Minor: 2, Internal: 16}, }, { Key: NewSchemaChanger, Version: roachpb.Version{Major: 20, Minor: 2, Internal: 18}, }, + { + Key: LongRunningMigrations, + Version: roachpb.Version{Major: 20, Minor: 2, Internal: 20}, + }, + { + Key: TruncatedAndRangeAppliedStateMigration, + Version: roachpb.Version{Major: 20, Minor: 2, Internal: 22}, + }, + { + Key: PostTruncatedAndRangeAppliedStateMigration, + Version: roachpb.Version{Major: 20, Minor: 2, Internal: 24}, + }, // Step (2): Add new versions here. }) diff --git a/pkg/clusterversion/key_string.go b/pkg/clusterversion/key_string.go index 140c4bc46ddd..4c84f52af97f 100644 --- a/pkg/clusterversion/key_string.go +++ b/pkg/clusterversion/key_string.go @@ -34,14 +34,17 @@ func _() { _ = x[VirtualComputedColumns-23] _ = x[CPutInline-24] _ = x[ReplicaVersions-25] - _ = x[TruncatedAndRangeAppliedStateMigration-26] - _ = x[PostTruncatedAndRangeAppliedStateMigration-27] + _ = x[replacedTruncatedAndRangeAppliedStateMigration-26] + _ = x[replacedPostTruncatedAndRangeAppliedStateMigration-27] _ = x[NewSchemaChanger-28] + _ = x[LongRunningMigrations-29] + _ = x[TruncatedAndRangeAppliedStateMigration-30] + _ = x[PostTruncatedAndRangeAppliedStateMigration-31] } -const _Key_name = "NamespaceTableWithSchemasStart20_2GeospatialTypeEnumsRangefeedLeasesAlterColumnTypeGeneralAlterSystemJobsAddCreatedByColumnsAddScheduledJobsTableUserDefinedSchemasNoOriginFKIndexesNodeMembershipStatusMinPasswordLengthAbortSpanBytesAlterSystemJobsAddSqllivenessColumnsAddNewSystemSqllivenessTableMaterializedViewsBox2DTypeUpdateScheduledJobsSchemaCreateLoginPrivilegeHBAForNonTLSV20_2Start21_1EmptyArraysInInvertedIndexesUniqueWithoutIndexConstraintsVirtualComputedColumnsCPutInlineReplicaVersionsTruncatedAndRangeAppliedStateMigrationPostTruncatedAndRangeAppliedStateMigrationNewSchemaChanger" +const _Key_name = "NamespaceTableWithSchemasStart20_2GeospatialTypeEnumsRangefeedLeasesAlterColumnTypeGeneralAlterSystemJobsAddCreatedByColumnsAddScheduledJobsTableUserDefinedSchemasNoOriginFKIndexesNodeMembershipStatusMinPasswordLengthAbortSpanBytesAlterSystemJobsAddSqllivenessColumnsAddNewSystemSqllivenessTableMaterializedViewsBox2DTypeUpdateScheduledJobsSchemaCreateLoginPrivilegeHBAForNonTLSV20_2Start21_1EmptyArraysInInvertedIndexesUniqueWithoutIndexConstraintsVirtualComputedColumnsCPutInlineReplicaVersionsreplacedTruncatedAndRangeAppliedStateMigrationreplacedPostTruncatedAndRangeAppliedStateMigrationNewSchemaChangerLongRunningMigrationsTruncatedAndRangeAppliedStateMigrationPostTruncatedAndRangeAppliedStateMigration" -var _Key_index = [...]uint16{0, 25, 34, 48, 53, 68, 90, 124, 145, 163, 180, 200, 217, 231, 295, 312, 321, 346, 366, 378, 383, 392, 420, 449, 471, 481, 496, 534, 576, 592} +var _Key_index = [...]uint16{0, 25, 34, 48, 53, 68, 90, 124, 145, 163, 180, 200, 217, 231, 295, 312, 321, 346, 366, 378, 383, 392, 420, 449, 471, 481, 496, 542, 592, 608, 629, 667, 709} func (i Key) String() string { if i < 0 || i >= Key(len(_Key_index)-1) { diff --git a/pkg/jobs/adopt.go b/pkg/jobs/adopt.go index c2fde4654f0f..fe84b8dfefb9 100644 --- a/pkg/jobs/adopt.go +++ b/pkg/jobs/adopt.go @@ -25,13 +25,22 @@ import ( "github.com/cockroachdb/errors" ) -const claimableStatusTupleString = `(` + - `'` + string(StatusRunning) + `', ` + - `'` + string(StatusPending) + `', ` + - `'` + string(StatusCancelRequested) + `', ` + - `'` + string(StatusPauseRequested) + `', ` + - `'` + string(StatusReverting) + `'` + - `)` +const ( + claimableStatusList = `'` + string(StatusRunning) + `', ` + + `'` + string(StatusPending) + `', ` + + `'` + string(StatusCancelRequested) + `', ` + + `'` + string(StatusPauseRequested) + `', ` + + `'` + string(StatusReverting) + `'` + + claimableStatusTupleString = `(` + claimableStatusList + `)` + + nonTerminalStatusList = claimableStatusList + `, ` + + `'` + string(StatusPaused) + `'` + + // NonTerminalStatusTupleString is a sql tuple corresponding to statuses of + // non-terminal jobs. + NonTerminalStatusTupleString = `(` + nonTerminalStatusList + `)` +) // claimJobs places a claim with the given SessionID to job rows that are // available. diff --git a/pkg/jobs/jobspb/BUILD.bazel b/pkg/jobs/jobspb/BUILD.bazel index 99e4d9544f56..be82a240d4e7 100644 --- a/pkg/jobs/jobspb/BUILD.bazel +++ b/pkg/jobs/jobspb/BUILD.bazel @@ -24,6 +24,7 @@ proto_library( strip_import_prefix = "/pkg", visibility = ["//visibility:public"], deps = [ + "//pkg/clusterversion:clusterversion_proto", "//pkg/roachpb:roachpb_proto", "//pkg/sql/catalog/descpb:descpb_proto", "//pkg/sql/schemachanger/scpb:scpb_proto", @@ -42,6 +43,7 @@ go_proto_library( visibility = ["//visibility:public"], deps = [ "//pkg/ccl/streamingccl", # keep + "//pkg/clusterversion", "//pkg/roachpb", "//pkg/security", # keep "//pkg/sql/catalog/descpb", diff --git a/pkg/jobs/jobspb/jobs.pb.go b/pkg/jobs/jobspb/jobs.pb.go index 02d2aa9d7d31..abe83fa22ccf 100644 --- a/pkg/jobs/jobspb/jobs.pb.go +++ b/pkg/jobs/jobspb/jobs.pb.go @@ -6,6 +6,7 @@ package jobspb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import clusterversion "github.com/cockroachdb/cockroach/pkg/clusterversion" import errorspb "github.com/cockroachdb/errors/errorspb" import roachpb "github.com/cockroachdb/cockroach/pkg/roachpb" import descpb "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -57,7 +58,7 @@ func (x EncryptionMode) String() string { return proto.EnumName(EncryptionMode_name, int32(x)) } func (EncryptionMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{0} } type Status int32 @@ -86,7 +87,7 @@ func (x Status) String() string { return proto.EnumName(Status_name, int32(x)) } func (Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{1} + return fileDescriptor_jobs_cbbe675afce7b718, []int{1} } type Type int32 @@ -106,6 +107,7 @@ const ( TypeTypeSchemaChange Type = 9 TypeStreamIngestion Type = 10 TypeNewSchemaChange Type = 11 + TypeMigration Type = 12 ) var Type_name = map[int32]string{ @@ -121,6 +123,7 @@ var Type_name = map[int32]string{ 9: "TYPEDESC_SCHEMA_CHANGE", 10: "STREAM_INGESTION", 11: "NEW_SCHEMA_CHANGE", + 12: "MIGRATION", } var Type_value = map[string]int32{ "UNSPECIFIED": 0, @@ -135,10 +138,11 @@ var Type_value = map[string]int32{ "TYPEDESC_SCHEMA_CHANGE": 9, "STREAM_INGESTION": 10, "NEW_SCHEMA_CHANGE": 11, + "MIGRATION": 12, } func (Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{2} + return fileDescriptor_jobs_cbbe675afce7b718, []int{2} } type EncryptionInfo_Scheme int32 @@ -158,7 +162,7 @@ func (x EncryptionInfo_Scheme) String() string { return proto.EnumName(EncryptionInfo_Scheme_name, int32(x)) } func (EncryptionInfo_Scheme) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{2, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{2, 0} } type SchemaChangeGCProgress_Status int32 @@ -188,7 +192,7 @@ func (x SchemaChangeGCProgress_Status) String() string { return proto.EnumName(SchemaChangeGCProgress_Status_name, int32(x)) } func (SchemaChangeGCProgress_Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{22, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{22, 0} } type Lease struct { @@ -202,7 +206,7 @@ func (m *Lease) Reset() { *m = Lease{} } func (m *Lease) String() string { return proto.CompactTextString(m) } func (*Lease) ProtoMessage() {} func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{0} } func (m *Lease) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +247,7 @@ func (m *BackupEncryptionOptions) Reset() { *m = BackupEncryptionOptions func (m *BackupEncryptionOptions) String() string { return proto.CompactTextString(m) } func (*BackupEncryptionOptions) ProtoMessage() {} func (*BackupEncryptionOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{1} + return fileDescriptor_jobs_cbbe675afce7b718, []int{1} } func (m *BackupEncryptionOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -277,7 +281,7 @@ func (m *BackupEncryptionOptions_KMSInfo) Reset() { *m = BackupEncryptio func (m *BackupEncryptionOptions_KMSInfo) String() string { return proto.CompactTextString(m) } func (*BackupEncryptionOptions_KMSInfo) ProtoMessage() {} func (*BackupEncryptionOptions_KMSInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{1, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{1, 0} } func (m *BackupEncryptionOptions_KMSInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -317,7 +321,7 @@ func (m *EncryptionInfo) Reset() { *m = EncryptionInfo{} } func (m *EncryptionInfo) String() string { return proto.CompactTextString(m) } func (*EncryptionInfo) ProtoMessage() {} func (*EncryptionInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{2} + return fileDescriptor_jobs_cbbe675afce7b718, []int{2} } func (m *EncryptionInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +364,7 @@ func (m *StreamIngestionDetails) Reset() { *m = StreamIngestionDetails{} func (m *StreamIngestionDetails) String() string { return proto.CompactTextString(m) } func (*StreamIngestionDetails) ProtoMessage() {} func (*StreamIngestionDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{3} + return fileDescriptor_jobs_cbbe675afce7b718, []int{3} } func (m *StreamIngestionDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -392,7 +396,7 @@ func (m *StreamIngestionProgress) Reset() { *m = StreamIngestionProgress func (m *StreamIngestionProgress) String() string { return proto.CompactTextString(m) } func (*StreamIngestionProgress) ProtoMessage() {} func (*StreamIngestionProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{4} + return fileDescriptor_jobs_cbbe675afce7b718, []int{4} } func (m *StreamIngestionProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -447,7 +451,7 @@ func (m *BackupDetails) Reset() { *m = BackupDetails{} } func (m *BackupDetails) String() string { return proto.CompactTextString(m) } func (*BackupDetails) ProtoMessage() {} func (*BackupDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{5} + return fileDescriptor_jobs_cbbe675afce7b718, []int{5} } func (m *BackupDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -479,7 +483,7 @@ func (m *BackupProgress) Reset() { *m = BackupProgress{} } func (m *BackupProgress) String() string { return proto.CompactTextString(m) } func (*BackupProgress) ProtoMessage() {} func (*BackupProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{6} + return fileDescriptor_jobs_cbbe675afce7b718, []int{6} } func (m *BackupProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -541,7 +545,7 @@ func (m *RestoreDetails) Reset() { *m = RestoreDetails{} } func (m *RestoreDetails) String() string { return proto.CompactTextString(m) } func (*RestoreDetails) ProtoMessage() {} func (*RestoreDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{7} + return fileDescriptor_jobs_cbbe675afce7b718, []int{7} } func (m *RestoreDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +582,7 @@ func (m *RestoreDetails_DescriptorRewrite) Reset() { *m = RestoreDetails func (m *RestoreDetails_DescriptorRewrite) String() string { return proto.CompactTextString(m) } func (*RestoreDetails_DescriptorRewrite) ProtoMessage() {} func (*RestoreDetails_DescriptorRewrite) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{7, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{7, 0} } func (m *RestoreDetails_DescriptorRewrite) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +615,7 @@ func (m *RestoreDetails_BackupLocalityInfo) Reset() { *m = RestoreDetail func (m *RestoreDetails_BackupLocalityInfo) String() string { return proto.CompactTextString(m) } func (*RestoreDetails_BackupLocalityInfo) ProtoMessage() {} func (*RestoreDetails_BackupLocalityInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{7, 1} + return fileDescriptor_jobs_cbbe675afce7b718, []int{7, 1} } func (m *RestoreDetails_BackupLocalityInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -644,7 +648,7 @@ func (m *RestoreProgress) Reset() { *m = RestoreProgress{} } func (m *RestoreProgress) String() string { return proto.CompactTextString(m) } func (*RestoreProgress) ProtoMessage() {} func (*RestoreProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{8} + return fileDescriptor_jobs_cbbe675afce7b718, []int{8} } func (m *RestoreProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -705,7 +709,7 @@ func (m *ImportDetails) Reset() { *m = ImportDetails{} } func (m *ImportDetails) String() string { return proto.CompactTextString(m) } func (*ImportDetails) ProtoMessage() {} func (*ImportDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{9} + return fileDescriptor_jobs_cbbe675afce7b718, []int{9} } func (m *ImportDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -743,7 +747,7 @@ func (m *ImportDetails_Table) Reset() { *m = ImportDetails_Table{} } func (m *ImportDetails_Table) String() string { return proto.CompactTextString(m) } func (*ImportDetails_Table) ProtoMessage() {} func (*ImportDetails_Table) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{9, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{9, 0} } func (m *ImportDetails_Table) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -785,7 +789,7 @@ func (m *SequenceValChunk) Reset() { *m = SequenceValChunk{} } func (m *SequenceValChunk) String() string { return proto.CompactTextString(m) } func (*SequenceValChunk) ProtoMessage() {} func (*SequenceValChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{10} + return fileDescriptor_jobs_cbbe675afce7b718, []int{10} } func (m *SequenceValChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -821,7 +825,7 @@ func (m *SequenceDetails) Reset() { *m = SequenceDetails{} } func (m *SequenceDetails) String() string { return proto.CompactTextString(m) } func (*SequenceDetails) ProtoMessage() {} func (*SequenceDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{11} + return fileDescriptor_jobs_cbbe675afce7b718, []int{11} } func (m *SequenceDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -856,7 +860,7 @@ func (m *SequenceDetails_SequenceChunks) Reset() { *m = SequenceDetails_ func (m *SequenceDetails_SequenceChunks) String() string { return proto.CompactTextString(m) } func (*SequenceDetails_SequenceChunks) ProtoMessage() {} func (*SequenceDetails_SequenceChunks) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{11, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{11, 0} } func (m *SequenceDetails_SequenceChunks) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -902,7 +906,7 @@ func (m *ImportProgress) Reset() { *m = ImportProgress{} } func (m *ImportProgress) String() string { return proto.CompactTextString(m) } func (*ImportProgress) ProtoMessage() {} func (*ImportProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{12} + return fileDescriptor_jobs_cbbe675afce7b718, []int{12} } func (m *ImportProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -945,7 +949,7 @@ func (m *TypeSchemaChangeDetails) Reset() { *m = TypeSchemaChangeDetails func (m *TypeSchemaChangeDetails) String() string { return proto.CompactTextString(m) } func (*TypeSchemaChangeDetails) ProtoMessage() {} func (*TypeSchemaChangeDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{13} + return fileDescriptor_jobs_cbbe675afce7b718, []int{13} } func (m *TypeSchemaChangeDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -978,7 +982,7 @@ func (m *TypeSchemaChangeProgress) Reset() { *m = TypeSchemaChangeProgre func (m *TypeSchemaChangeProgress) String() string { return proto.CompactTextString(m) } func (*TypeSchemaChangeProgress) ProtoMessage() {} func (*TypeSchemaChangeProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{14} + return fileDescriptor_jobs_cbbe675afce7b718, []int{14} } func (m *TypeSchemaChangeProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1012,7 +1016,7 @@ func (m *NewSchemaChangeDetails) Reset() { *m = NewSchemaChangeDetails{} func (m *NewSchemaChangeDetails) String() string { return proto.CompactTextString(m) } func (*NewSchemaChangeDetails) ProtoMessage() {} func (*NewSchemaChangeDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{15} + return fileDescriptor_jobs_cbbe675afce7b718, []int{15} } func (m *NewSchemaChangeDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1046,7 +1050,7 @@ func (m *NewSchemaChangeProgress) Reset() { *m = NewSchemaChangeProgress func (m *NewSchemaChangeProgress) String() string { return proto.CompactTextString(m) } func (*NewSchemaChangeProgress) ProtoMessage() {} func (*NewSchemaChangeProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{16} + return fileDescriptor_jobs_cbbe675afce7b718, []int{16} } func (m *NewSchemaChangeProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1079,7 +1083,7 @@ func (m *ResumeSpanList) Reset() { *m = ResumeSpanList{} } func (m *ResumeSpanList) String() string { return proto.CompactTextString(m) } func (*ResumeSpanList) ProtoMessage() {} func (*ResumeSpanList) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{17} + return fileDescriptor_jobs_cbbe675afce7b718, []int{17} } func (m *ResumeSpanList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1114,7 +1118,7 @@ func (m *DroppedTableDetails) Reset() { *m = DroppedTableDetails{} } func (m *DroppedTableDetails) String() string { return proto.CompactTextString(m) } func (*DroppedTableDetails) ProtoMessage() {} func (*DroppedTableDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{18} + return fileDescriptor_jobs_cbbe675afce7b718, []int{18} } func (m *DroppedTableDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1177,7 +1181,7 @@ func (m *SchemaChangeGCDetails) Reset() { *m = SchemaChangeGCDetails{} } func (m *SchemaChangeGCDetails) String() string { return proto.CompactTextString(m) } func (*SchemaChangeGCDetails) ProtoMessage() {} func (*SchemaChangeGCDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{19} + return fileDescriptor_jobs_cbbe675afce7b718, []int{19} } func (m *SchemaChangeGCDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1211,7 +1215,7 @@ func (m *SchemaChangeGCDetails_DroppedIndex) Reset() { *m = SchemaChange func (m *SchemaChangeGCDetails_DroppedIndex) String() string { return proto.CompactTextString(m) } func (*SchemaChangeGCDetails_DroppedIndex) ProtoMessage() {} func (*SchemaChangeGCDetails_DroppedIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{19, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{19, 0} } func (m *SchemaChangeGCDetails_DroppedIndex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1245,7 +1249,7 @@ func (m *SchemaChangeGCDetails_DroppedID) Reset() { *m = SchemaChangeGCD func (m *SchemaChangeGCDetails_DroppedID) String() string { return proto.CompactTextString(m) } func (*SchemaChangeGCDetails_DroppedID) ProtoMessage() {} func (*SchemaChangeGCDetails_DroppedID) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{19, 1} + return fileDescriptor_jobs_cbbe675afce7b718, []int{19, 1} } func (m *SchemaChangeGCDetails_DroppedID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1279,7 +1283,7 @@ func (m *SchemaChangeGCDetails_DroppedTenant) Reset() { *m = SchemaChang func (m *SchemaChangeGCDetails_DroppedTenant) String() string { return proto.CompactTextString(m) } func (*SchemaChangeGCDetails_DroppedTenant) ProtoMessage() {} func (*SchemaChangeGCDetails_DroppedTenant) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{19, 2} + return fileDescriptor_jobs_cbbe675afce7b718, []int{19, 2} } func (m *SchemaChangeGCDetails_DroppedTenant) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1343,7 +1347,7 @@ func (m *SchemaChangeDetails) Reset() { *m = SchemaChangeDetails{} } func (m *SchemaChangeDetails) String() string { return proto.CompactTextString(m) } func (*SchemaChangeDetails) ProtoMessage() {} func (*SchemaChangeDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{20} + return fileDescriptor_jobs_cbbe675afce7b718, []int{20} } func (m *SchemaChangeDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1375,7 +1379,7 @@ func (m *SchemaChangeProgress) Reset() { *m = SchemaChangeProgress{} } func (m *SchemaChangeProgress) String() string { return proto.CompactTextString(m) } func (*SchemaChangeProgress) ProtoMessage() {} func (*SchemaChangeProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{21} + return fileDescriptor_jobs_cbbe675afce7b718, []int{21} } func (m *SchemaChangeProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1413,7 +1417,7 @@ func (m *SchemaChangeGCProgress) Reset() { *m = SchemaChangeGCProgress{} func (m *SchemaChangeGCProgress) String() string { return proto.CompactTextString(m) } func (*SchemaChangeGCProgress) ProtoMessage() {} func (*SchemaChangeGCProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{22} + return fileDescriptor_jobs_cbbe675afce7b718, []int{22} } func (m *SchemaChangeGCProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1447,7 +1451,7 @@ func (m *SchemaChangeGCProgress_IndexProgress) Reset() { *m = SchemaChan func (m *SchemaChangeGCProgress_IndexProgress) String() string { return proto.CompactTextString(m) } func (*SchemaChangeGCProgress_IndexProgress) ProtoMessage() {} func (*SchemaChangeGCProgress_IndexProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{22, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{22, 0} } func (m *SchemaChangeGCProgress_IndexProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1481,7 +1485,7 @@ func (m *SchemaChangeGCProgress_TableProgress) Reset() { *m = SchemaChan func (m *SchemaChangeGCProgress_TableProgress) String() string { return proto.CompactTextString(m) } func (*SchemaChangeGCProgress_TableProgress) ProtoMessage() {} func (*SchemaChangeGCProgress_TableProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{22, 1} + return fileDescriptor_jobs_cbbe675afce7b718, []int{22, 1} } func (m *SchemaChangeGCProgress_TableProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1514,7 +1518,7 @@ func (m *SchemaChangeGCProgress_TenantProgress) Reset() { *m = SchemaCha func (m *SchemaChangeGCProgress_TenantProgress) String() string { return proto.CompactTextString(m) } func (*SchemaChangeGCProgress_TenantProgress) ProtoMessage() {} func (*SchemaChangeGCProgress_TenantProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{22, 2} + return fileDescriptor_jobs_cbbe675afce7b718, []int{22, 2} } func (m *SchemaChangeGCProgress_TenantProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1547,7 +1551,7 @@ func (m *ChangefeedTarget) Reset() { *m = ChangefeedTarget{} } func (m *ChangefeedTarget) String() string { return proto.CompactTextString(m) } func (*ChangefeedTarget) ProtoMessage() {} func (*ChangefeedTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{23} + return fileDescriptor_jobs_cbbe675afce7b718, []int{23} } func (m *ChangefeedTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1600,7 +1604,7 @@ func (m *ChangefeedDetails) Reset() { *m = ChangefeedDetails{} } func (m *ChangefeedDetails) String() string { return proto.CompactTextString(m) } func (*ChangefeedDetails) ProtoMessage() {} func (*ChangefeedDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{24} + return fileDescriptor_jobs_cbbe675afce7b718, []int{24} } func (m *ChangefeedDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1635,7 +1639,7 @@ func (m *ResolvedSpan) Reset() { *m = ResolvedSpan{} } func (m *ResolvedSpan) String() string { return proto.CompactTextString(m) } func (*ResolvedSpan) ProtoMessage() {} func (*ResolvedSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{25} + return fileDescriptor_jobs_cbbe675afce7b718, []int{25} } func (m *ResolvedSpan) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1678,7 +1682,7 @@ func (m *ChangefeedProgress) Reset() { *m = ChangefeedProgress{} } func (m *ChangefeedProgress) String() string { return proto.CompactTextString(m) } func (*ChangefeedProgress) ProtoMessage() {} func (*ChangefeedProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{26} + return fileDescriptor_jobs_cbbe675afce7b718, []int{26} } func (m *ChangefeedProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1722,7 +1726,7 @@ func (m *CreateStatsDetails) Reset() { *m = CreateStatsDetails{} } func (m *CreateStatsDetails) String() string { return proto.CompactTextString(m) } func (*CreateStatsDetails) ProtoMessage() {} func (*CreateStatsDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{27} + return fileDescriptor_jobs_cbbe675afce7b718, []int{27} } func (m *CreateStatsDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1763,7 +1767,7 @@ func (m *CreateStatsDetails_ColStat) Reset() { *m = CreateStatsDetails_C func (m *CreateStatsDetails_ColStat) String() string { return proto.CompactTextString(m) } func (*CreateStatsDetails_ColStat) ProtoMessage() {} func (*CreateStatsDetails_ColStat) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{27, 0} + return fileDescriptor_jobs_cbbe675afce7b718, []int{27, 0} } func (m *CreateStatsDetails_ColStat) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1795,7 +1799,7 @@ func (m *CreateStatsProgress) Reset() { *m = CreateStatsProgress{} } func (m *CreateStatsProgress) String() string { return proto.CompactTextString(m) } func (*CreateStatsProgress) ProtoMessage() {} func (*CreateStatsProgress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{28} + return fileDescriptor_jobs_cbbe675afce7b718, []int{28} } func (m *CreateStatsProgress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1820,6 +1824,71 @@ func (m *CreateStatsProgress) XXX_DiscardUnknown() { var xxx_messageInfo_CreateStatsProgress proto.InternalMessageInfo +type MigrationDetails struct { + ClusterVersion *clusterversion.ClusterVersion `protobuf:"bytes,1,opt,name=cluster_version,json=clusterVersion,proto3" json:"cluster_version,omitempty"` +} + +func (m *MigrationDetails) Reset() { *m = MigrationDetails{} } +func (m *MigrationDetails) String() string { return proto.CompactTextString(m) } +func (*MigrationDetails) ProtoMessage() {} +func (*MigrationDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_jobs_cbbe675afce7b718, []int{29} +} +func (m *MigrationDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MigrationDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (dst *MigrationDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_MigrationDetails.Merge(dst, src) +} +func (m *MigrationDetails) XXX_Size() int { + return m.Size() +} +func (m *MigrationDetails) XXX_DiscardUnknown() { + xxx_messageInfo_MigrationDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_MigrationDetails proto.InternalMessageInfo + +type MigrationProgress struct { +} + +func (m *MigrationProgress) Reset() { *m = MigrationProgress{} } +func (m *MigrationProgress) String() string { return proto.CompactTextString(m) } +func (*MigrationProgress) ProtoMessage() {} +func (*MigrationProgress) Descriptor() ([]byte, []int) { + return fileDescriptor_jobs_cbbe675afce7b718, []int{30} +} +func (m *MigrationProgress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MigrationProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (dst *MigrationProgress) XXX_Merge(src proto.Message) { + xxx_messageInfo_MigrationProgress.Merge(dst, src) +} +func (m *MigrationProgress) XXX_Size() int { + return m.Size() +} +func (m *MigrationProgress) XXX_DiscardUnknown() { + xxx_messageInfo_MigrationProgress.DiscardUnknown(m) +} + +var xxx_messageInfo_MigrationProgress proto.InternalMessageInfo + type Payload struct { Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // If empty, the description is assumed to be the statement. @@ -1857,6 +1926,7 @@ type Payload struct { // *Payload_TypeSchemaChange // *Payload_StreamIngestion // *Payload_NewSchemaChange + // *Payload_Migration Details isPayload_Details `protobuf_oneof:"details"` } @@ -1864,7 +1934,7 @@ func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} func (*Payload) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{29} + return fileDescriptor_jobs_cbbe675afce7b718, []int{31} } func (m *Payload) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1925,6 +1995,9 @@ type Payload_StreamIngestion struct { type Payload_NewSchemaChange struct { NewSchemaChange *NewSchemaChangeDetails `protobuf:"bytes,24,opt,name=newSchemaChange,proto3,oneof"` } +type Payload_Migration struct { + Migration *MigrationDetails `protobuf:"bytes,25,opt,name=migration,proto3,oneof"` +} func (*Payload_Backup) isPayload_Details() {} func (*Payload_Restore) isPayload_Details() {} @@ -1936,6 +2009,7 @@ func (*Payload_SchemaChangeGC) isPayload_Details() {} func (*Payload_TypeSchemaChange) isPayload_Details() {} func (*Payload_StreamIngestion) isPayload_Details() {} func (*Payload_NewSchemaChange) isPayload_Details() {} +func (*Payload_Migration) isPayload_Details() {} func (m *Payload) GetDetails() isPayload_Details { if m != nil { @@ -2014,6 +2088,13 @@ func (m *Payload) GetNewSchemaChange() *NewSchemaChangeDetails { return nil } +func (m *Payload) GetMigration() *MigrationDetails { + if x, ok := m.GetDetails().(*Payload_Migration); ok { + return x.Migration + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*Payload) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _Payload_OneofMarshaler, _Payload_OneofUnmarshaler, _Payload_OneofSizer, []interface{}{ @@ -2027,6 +2108,7 @@ func (*Payload) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error (*Payload_TypeSchemaChange)(nil), (*Payload_StreamIngestion)(nil), (*Payload_NewSchemaChange)(nil), + (*Payload_Migration)(nil), } } @@ -2084,6 +2166,11 @@ func _Payload_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.NewSchemaChange); err != nil { return err } + case *Payload_Migration: + _ = b.EncodeVarint(25<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Migration); err != nil { + return err + } case nil: default: return fmt.Errorf("Payload.Details has unexpected type %T", x) @@ -2174,6 +2261,14 @@ func _Payload_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer err := b.DecodeMessage(msg) m.Details = &Payload_NewSchemaChange{msg} return true, err + case 25: // details.migration + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MigrationDetails) + err := b.DecodeMessage(msg) + m.Details = &Payload_Migration{msg} + return true, err default: return false, nil } @@ -2233,6 +2328,11 @@ func _Payload_OneofSizer(msg proto.Message) (n int) { n += 2 // tag and wire n += proto.SizeVarint(uint64(s)) n += s + case *Payload_Migration: + s := proto.Size(x.Migration) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -2258,6 +2358,7 @@ type Progress struct { // *Progress_TypeSchemaChange // *Progress_StreamIngest // *Progress_NewSchemaChange + // *Progress_Migration Details isProgress_Details `protobuf_oneof:"details"` } @@ -2265,7 +2366,7 @@ func (m *Progress) Reset() { *m = Progress{} } func (m *Progress) String() string { return proto.CompactTextString(m) } func (*Progress) ProtoMessage() {} func (*Progress) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{30} + return fileDescriptor_jobs_cbbe675afce7b718, []int{32} } func (m *Progress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2337,6 +2438,9 @@ type Progress_StreamIngest struct { type Progress_NewSchemaChange struct { NewSchemaChange *NewSchemaChangeProgress `protobuf:"bytes,19,opt,name=newSchemaChange,proto3,oneof"` } +type Progress_Migration struct { + Migration *MigrationProgress `protobuf:"bytes,20,opt,name=migration,proto3,oneof"` +} func (*Progress_FractionCompleted) isProgress_Progress() {} func (*Progress_HighWater) isProgress_Progress() {} @@ -2350,6 +2454,7 @@ func (*Progress_SchemaChangeGC) isProgress_Details() {} func (*Progress_TypeSchemaChange) isProgress_Details() {} func (*Progress_StreamIngest) isProgress_Details() {} func (*Progress_NewSchemaChange) isProgress_Details() {} +func (*Progress_Migration) isProgress_Details() {} func (m *Progress) GetProgress() isProgress_Progress { if m != nil { @@ -2448,6 +2553,13 @@ func (m *Progress) GetNewSchemaChange() *NewSchemaChangeProgress { return nil } +func (m *Progress) GetMigration() *MigrationProgress { + if x, ok := m.GetDetails().(*Progress_Migration); ok { + return x.Migration + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*Progress) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _Progress_OneofMarshaler, _Progress_OneofUnmarshaler, _Progress_OneofSizer, []interface{}{ @@ -2463,6 +2575,7 @@ func (*Progress) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) erro (*Progress_TypeSchemaChange)(nil), (*Progress_StreamIngest)(nil), (*Progress_NewSchemaChange)(nil), + (*Progress_Migration)(nil), } } @@ -2534,6 +2647,11 @@ func _Progress_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.NewSchemaChange); err != nil { return err } + case *Progress_Migration: + _ = b.EncodeVarint(20<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Migration); err != nil { + return err + } case nil: default: return fmt.Errorf("Progress.Details has unexpected type %T", x) @@ -2639,6 +2757,14 @@ func _Progress_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffe err := b.DecodeMessage(msg) m.Details = &Progress_NewSchemaChange{msg} return true, err + case 20: // details.migration + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MigrationProgress) + err := b.DecodeMessage(msg) + m.Details = &Progress_Migration{msg} + return true, err default: return false, nil } @@ -2712,6 +2838,11 @@ func _Progress_OneofSizer(msg proto.Message) (n int) { n += 2 // tag and wire n += proto.SizeVarint(uint64(s)) n += s + case *Progress_Migration: + s := proto.Size(x.Migration) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -2731,7 +2862,7 @@ func (m *Job) Reset() { *m = Job{} } func (m *Job) String() string { return proto.CompactTextString(m) } func (*Job) ProtoMessage() {} func (*Job) Descriptor() ([]byte, []int) { - return fileDescriptor_jobs_8b84a7e749431ded, []int{31} + return fileDescriptor_jobs_cbbe675afce7b718, []int{33} } func (m *Job) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2805,6 +2936,8 @@ func init() { proto.RegisterType((*CreateStatsDetails)(nil), "cockroach.sql.jobs.jobspb.CreateStatsDetails") proto.RegisterType((*CreateStatsDetails_ColStat)(nil), "cockroach.sql.jobs.jobspb.CreateStatsDetails.ColStat") proto.RegisterType((*CreateStatsProgress)(nil), "cockroach.sql.jobs.jobspb.CreateStatsProgress") + proto.RegisterType((*MigrationDetails)(nil), "cockroach.sql.jobs.jobspb.MigrationDetails") + proto.RegisterType((*MigrationProgress)(nil), "cockroach.sql.jobs.jobspb.MigrationProgress") proto.RegisterType((*Payload)(nil), "cockroach.sql.jobs.jobspb.Payload") proto.RegisterType((*Progress)(nil), "cockroach.sql.jobs.jobspb.Progress") proto.RegisterType((*Job)(nil), "cockroach.sql.jobs.jobspb.Job") @@ -4934,6 +5067,52 @@ func (m *CreateStatsProgress) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *MigrationDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MigrationDetails) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClusterVersion != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintJobs(dAtA, i, uint64(m.ClusterVersion.Size())) + n39, err := m.ClusterVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} + +func (m *MigrationProgress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MigrationProgress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + func (m *Payload) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4972,21 +5151,21 @@ func (m *Payload) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintJobs(dAtA, i, uint64(m.FinishedMicros)) } if len(m.DescriptorIDs) > 0 { - dAtA40 := make([]byte, len(m.DescriptorIDs)*10) - var j39 int + dAtA41 := make([]byte, len(m.DescriptorIDs)*10) + var j40 int for _, num := range m.DescriptorIDs { for num >= 1<<7 { - dAtA40[j39] = uint8(uint64(num)&0x7f | 0x80) + dAtA41[j40] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j39++ + j40++ } - dAtA40[j39] = uint8(num) - j39++ + dAtA41[j40] = uint8(num) + j40++ } dAtA[i] = 0x32 i++ - i = encodeVarintJobs(dAtA, i, uint64(j39)) - i += copy(dAtA[i:], dAtA40[:j39]) + i = encodeVarintJobs(dAtA, i, uint64(j40)) + i += copy(dAtA[i:], dAtA41[:j40]) } if len(m.Error) > 0 { dAtA[i] = 0x42 @@ -4998,18 +5177,18 @@ func (m *Payload) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x4a i++ i = encodeVarintJobs(dAtA, i, uint64(m.Lease.Size())) - n41, err := m.Lease.MarshalTo(dAtA[i:]) + n42, err := m.Lease.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n42 } if m.Details != nil { - nn42, err := m.Details.MarshalTo(dAtA[i:]) + nn43, err := m.Details.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn42 + i += nn43 } if len(m.Statement) > 0 { dAtA[i] = 0x82 @@ -5053,11 +5232,11 @@ func (m *Payload) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.FinalResumeError.Size())) - n43, err := m.FinalResumeError.MarshalTo(dAtA[i:]) + n44, err := m.FinalResumeError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n44 } if m.Noncancelable { dAtA[i] = 0xa0 @@ -5080,11 +5259,11 @@ func (m *Payload_Backup) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintJobs(dAtA, i, uint64(m.Backup.Size())) - n44, err := m.Backup.MarshalTo(dAtA[i:]) + n45, err := m.Backup.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n45 } return i, nil } @@ -5094,11 +5273,11 @@ func (m *Payload_Restore) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x5a i++ i = encodeVarintJobs(dAtA, i, uint64(m.Restore.Size())) - n45, err := m.Restore.MarshalTo(dAtA[i:]) + n46, err := m.Restore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n46 } return i, nil } @@ -5108,11 +5287,11 @@ func (m *Payload_SchemaChange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x62 i++ i = encodeVarintJobs(dAtA, i, uint64(m.SchemaChange.Size())) - n46, err := m.SchemaChange.MarshalTo(dAtA[i:]) + n47, err := m.SchemaChange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n47 } return i, nil } @@ -5122,11 +5301,11 @@ func (m *Payload_Import) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x6a i++ i = encodeVarintJobs(dAtA, i, uint64(m.Import.Size())) - n47, err := m.Import.MarshalTo(dAtA[i:]) + n48, err := m.Import.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n48 } return i, nil } @@ -5136,11 +5315,11 @@ func (m *Payload_Changefeed) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintJobs(dAtA, i, uint64(m.Changefeed.Size())) - n48, err := m.Changefeed.MarshalTo(dAtA[i:]) + n49, err := m.Changefeed.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n49 } return i, nil } @@ -5150,11 +5329,11 @@ func (m *Payload_CreateStats) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x7a i++ i = encodeVarintJobs(dAtA, i, uint64(m.CreateStats.Size())) - n49, err := m.CreateStats.MarshalTo(dAtA[i:]) + n50, err := m.CreateStats.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n50 } return i, nil } @@ -5166,11 +5345,11 @@ func (m *Payload_SchemaChangeGC) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.SchemaChangeGC.Size())) - n50, err := m.SchemaChangeGC.MarshalTo(dAtA[i:]) + n51, err := m.SchemaChangeGC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n51 } return i, nil } @@ -5182,11 +5361,11 @@ func (m *Payload_TypeSchemaChange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.TypeSchemaChange.Size())) - n51, err := m.TypeSchemaChange.MarshalTo(dAtA[i:]) + n52, err := m.TypeSchemaChange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n52 } return i, nil } @@ -5198,11 +5377,11 @@ func (m *Payload_StreamIngestion) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.StreamIngestion.Size())) - n52, err := m.StreamIngestion.MarshalTo(dAtA[i:]) + n53, err := m.StreamIngestion.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n53 } return i, nil } @@ -5214,11 +5393,27 @@ func (m *Payload_NewSchemaChange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.NewSchemaChange.Size())) - n53, err := m.NewSchemaChange.MarshalTo(dAtA[i:]) + n54, err := m.NewSchemaChange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n54 + } + return i, nil +} +func (m *Payload_Migration) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Migration != nil { + dAtA[i] = 0xca + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintJobs(dAtA, i, uint64(m.Migration.Size())) + n55, err := m.Migration.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 } return i, nil } @@ -5238,11 +5433,11 @@ func (m *Progress) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if m.Progress != nil { - nn54, err := m.Progress.MarshalTo(dAtA[i:]) + nn56, err := m.Progress.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn54 + i += nn56 } if m.ModifiedMicros != 0 { dAtA[i] = 0x10 @@ -5256,11 +5451,11 @@ func (m *Progress) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], m.RunningStatus) } if m.Details != nil { - nn55, err := m.Details.MarshalTo(dAtA[i:]) + nn57, err := m.Details.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn55 + i += nn57 } return i, nil } @@ -5279,11 +5474,11 @@ func (m *Progress_HighWater) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintJobs(dAtA, i, uint64(m.HighWater.Size())) - n56, err := m.HighWater.MarshalTo(dAtA[i:]) + n58, err := m.HighWater.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n58 } return i, nil } @@ -5293,11 +5488,11 @@ func (m *Progress_Backup) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintJobs(dAtA, i, uint64(m.Backup.Size())) - n57, err := m.Backup.MarshalTo(dAtA[i:]) + n59, err := m.Backup.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n59 } return i, nil } @@ -5307,11 +5502,11 @@ func (m *Progress_Restore) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x5a i++ i = encodeVarintJobs(dAtA, i, uint64(m.Restore.Size())) - n58, err := m.Restore.MarshalTo(dAtA[i:]) + n60, err := m.Restore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n60 } return i, nil } @@ -5321,11 +5516,11 @@ func (m *Progress_SchemaChange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x62 i++ i = encodeVarintJobs(dAtA, i, uint64(m.SchemaChange.Size())) - n59, err := m.SchemaChange.MarshalTo(dAtA[i:]) + n61, err := m.SchemaChange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n61 } return i, nil } @@ -5335,11 +5530,11 @@ func (m *Progress_Import) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x6a i++ i = encodeVarintJobs(dAtA, i, uint64(m.Import.Size())) - n60, err := m.Import.MarshalTo(dAtA[i:]) + n62, err := m.Import.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n62 } return i, nil } @@ -5349,11 +5544,11 @@ func (m *Progress_Changefeed) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintJobs(dAtA, i, uint64(m.Changefeed.Size())) - n61, err := m.Changefeed.MarshalTo(dAtA[i:]) + n63, err := m.Changefeed.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n63 } return i, nil } @@ -5363,11 +5558,11 @@ func (m *Progress_CreateStats) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x7a i++ i = encodeVarintJobs(dAtA, i, uint64(m.CreateStats.Size())) - n62, err := m.CreateStats.MarshalTo(dAtA[i:]) + n64, err := m.CreateStats.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n64 } return i, nil } @@ -5379,11 +5574,11 @@ func (m *Progress_SchemaChangeGC) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.SchemaChangeGC.Size())) - n63, err := m.SchemaChangeGC.MarshalTo(dAtA[i:]) + n65, err := m.SchemaChangeGC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n63 + i += n65 } return i, nil } @@ -5395,11 +5590,11 @@ func (m *Progress_TypeSchemaChange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.TypeSchemaChange.Size())) - n64, err := m.TypeSchemaChange.MarshalTo(dAtA[i:]) + n66, err := m.TypeSchemaChange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n66 } return i, nil } @@ -5411,11 +5606,11 @@ func (m *Progress_StreamIngest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.StreamIngest.Size())) - n65, err := m.StreamIngest.MarshalTo(dAtA[i:]) + n67, err := m.StreamIngest.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n67 } return i, nil } @@ -5427,11 +5622,27 @@ func (m *Progress_NewSchemaChange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintJobs(dAtA, i, uint64(m.NewSchemaChange.Size())) - n66, err := m.NewSchemaChange.MarshalTo(dAtA[i:]) + n68, err := m.NewSchemaChange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n68 + } + return i, nil +} +func (m *Progress_Migration) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Migration != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintJobs(dAtA, i, uint64(m.Migration.Size())) + n69, err := m.Migration.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n69 } return i, nil } @@ -5459,21 +5670,21 @@ func (m *Job) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintJobs(dAtA, i, uint64(m.Progress.Size())) - n67, err := m.Progress.MarshalTo(dAtA[i:]) + n70, err := m.Progress.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n70 } if m.Payload != nil { dAtA[i] = 0x1a i++ i = encodeVarintJobs(dAtA, i, uint64(m.Payload.Size())) - n68, err := m.Payload.MarshalTo(dAtA[i:]) + n71, err := m.Payload.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n71 } return i, nil } @@ -6418,6 +6629,28 @@ func (m *CreateStatsProgress) Size() (n int) { return n } +func (m *MigrationDetails) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterVersion != nil { + l = m.ClusterVersion.Size() + n += 1 + l + sovJobs(uint64(l)) + } + return n +} + +func (m *MigrationProgress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + func (m *Payload) Size() (n int) { if m == nil { return 0 @@ -6602,6 +6835,18 @@ func (m *Payload_NewSchemaChange) Size() (n int) { } return n } +func (m *Payload_Migration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Migration != nil { + l = m.Migration.Size() + n += 2 + l + sovJobs(uint64(l)) + } + return n +} func (m *Progress) Size() (n int) { if m == nil { return 0 @@ -6765,6 +7010,18 @@ func (m *Progress_NewSchemaChange) Size() (n int) { } return n } +func (m *Progress_Migration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Migration != nil { + l = m.Migration.Size() + n += 2 + l + sovJobs(uint64(l)) + } + return n +} func (m *Job) Size() (n int) { if m == nil { return 0 @@ -13162,6 +13419,139 @@ func (m *CreateStatsProgress) Unmarshal(dAtA []byte) error { } return nil } +func (m *MigrationDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowJobs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MigrationDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MigrationDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowJobs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthJobs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterVersion == nil { + m.ClusterVersion = &clusterversion.ClusterVersion{} + } + if err := m.ClusterVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipJobs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthJobs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MigrationProgress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowJobs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MigrationProgress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MigrationProgress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipJobs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthJobs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Payload) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -13886,6 +14276,38 @@ func (m *Payload) Unmarshal(dAtA []byte) error { } m.Details = &Payload_NewSchemaChange{v} iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Migration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowJobs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthJobs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &MigrationDetails{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Details = &Payload_Migration{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipJobs(dAtA[iNdEx:]) @@ -14347,6 +14769,38 @@ func (m *Progress) Unmarshal(dAtA []byte) error { } m.Details = &Progress_NewSchemaChange{v} iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Migration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowJobs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthJobs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &MigrationProgress{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Details = &Progress_Migration{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipJobs(dAtA[iNdEx:]) @@ -14608,303 +15062,310 @@ var ( ErrIntOverflowJobs = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("jobs/jobspb/jobs.proto", fileDescriptor_jobs_8b84a7e749431ded) } - -var fileDescriptor_jobs_8b84a7e749431ded = []byte{ - // 4717 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x7b, 0x4b, 0x6c, 0x23, 0x47, - 0x7a, 0xbf, 0x9a, 0xa4, 0xc8, 0xe6, 0x47, 0x91, 0x6a, 0x95, 0x34, 0x33, 0x5c, 0xfe, 0x6d, 0x51, - 0x4b, 0xbf, 0x66, 0xc6, 0x36, 0xe5, 0x95, 0xff, 0xeb, 0xb5, 0x27, 0xf6, 0xd8, 0x7c, 0x8d, 0x44, - 0x6a, 0xf4, 0x70, 0x53, 0x1a, 0x3f, 0x36, 0xde, 0x4e, 0xb3, 0xbb, 0x24, 0x75, 0x44, 0x76, 0x73, - 0xba, 0x9a, 0x33, 0xa3, 0x0d, 0x90, 0x04, 0x1b, 0x04, 0x58, 0xcc, 0x29, 0x01, 0x92, 0x5c, 0x92, - 0x01, 0x02, 0x64, 0x17, 0x08, 0x92, 0x00, 0x41, 0x8c, 0x20, 0xc9, 0x21, 0xb7, 0x5c, 0x7c, 0x48, - 0x80, 0xbd, 0x04, 0x30, 0x72, 0xe0, 0x26, 0xf2, 0x25, 0xc7, 0x20, 0x7b, 0x9b, 0x4b, 0x82, 0x7a, - 0x74, 0xb3, 0x49, 0xea, 0x41, 0x8d, 0xec, 0xcd, 0x65, 0x86, 0xfd, 0xd5, 0x57, 0xbf, 0xaa, 0xfa, - 0xea, 0xab, 0xef, 0x55, 0x25, 0xb8, 0xfa, 0xeb, 0x4e, 0x8b, 0x2c, 0xd3, 0x7f, 0xba, 0x2d, 0xf6, - 0x5f, 0xb1, 0xeb, 0x3a, 0x9e, 0x83, 0xbe, 0x65, 0x38, 0xc6, 0xa1, 0xeb, 0xe8, 0xc6, 0x41, 0x91, - 0xdc, 0x6f, 0x17, 0x59, 0x0b, 0xe7, 0xca, 0x5d, 0xc1, 0xae, 0xeb, 0xb8, 0x94, 0x9f, 0xff, 0xe0, - 0x3d, 0x72, 0x0b, 0xfb, 0xce, 0xbe, 0xc3, 0x7e, 0x2e, 0xd3, 0x5f, 0x82, 0x8a, 0x18, 0x46, 0xb7, - 0xb5, 0x6c, 0xea, 0x9e, 0x2e, 0x68, 0x59, 0x9f, 0x66, 0x39, 0xaf, 0xef, 0x39, 0x6e, 0x47, 0xf7, - 0x7c, 0x8c, 0x17, 0xc8, 0xfd, 0xf6, 0xb2, 0xa1, 0x7b, 0x7a, 0xdb, 0xd9, 0x5f, 0x36, 0x31, 0x31, - 0xba, 0xad, 0x65, 0xe2, 0xb9, 0x3d, 0xc3, 0xeb, 0xb9, 0xd8, 0x14, 0x4c, 0xf9, 0x13, 0x98, 0x3c, - 0x6c, 0xeb, 0xb6, 0xe7, 0xe3, 0xf7, 0x3c, 0xab, 0xbd, 0x7c, 0xd0, 0x36, 0x96, 0x3d, 0xab, 0x83, - 0x89, 0xa7, 0x77, 0xba, 0xa2, 0xe5, 0xdb, 0xb4, 0x2b, 0x31, 0x0e, 0x70, 0x47, 0x37, 0x0e, 0x74, - 0x7b, 0x1f, 0xbb, 0xcb, 0x7c, 0x0c, 0xa3, 0xdb, 0xe2, 0x2c, 0x85, 0xdf, 0x82, 0xe9, 0xbb, 0x58, - 0x27, 0x18, 0x7d, 0x0a, 0x09, 0xdb, 0x31, 0xb1, 0x66, 0x99, 0x59, 0x69, 0x49, 0xba, 0x9e, 0x2e, - 0x97, 0x8e, 0xfb, 0xf9, 0xf8, 0xa6, 0x63, 0xe2, 0x7a, 0xf5, 0x69, 0x3f, 0xff, 0xe6, 0xbe, 0xe5, - 0x1d, 0xf4, 0x5a, 0x45, 0xc3, 0xe9, 0x2c, 0x07, 0xb2, 0x32, 0x5b, 0x83, 0xdf, 0xcb, 0xdd, 0xc3, - 0xfd, 0x65, 0xb1, 0xd2, 0x22, 0xef, 0xa6, 0xc6, 0x29, 0x62, 0xdd, 0x44, 0x0b, 0x30, 0x8d, 0xbb, - 0x8e, 0x71, 0x90, 0x8d, 0x2c, 0x49, 0xd7, 0xa3, 0x2a, 0xff, 0xb8, 0x15, 0xfb, 0xcf, 0x3f, 0xcd, - 0x4b, 0x85, 0x9f, 0x46, 0xe0, 0x5a, 0x59, 0x37, 0x0e, 0x7b, 0xdd, 0x9a, 0x6d, 0xb8, 0x47, 0x5d, - 0xcf, 0x72, 0xec, 0x2d, 0xf6, 0x2f, 0x41, 0x0a, 0x44, 0x0f, 0xf1, 0x11, 0x9b, 0xcf, 0x8c, 0x4a, - 0x7f, 0xa2, 0xf7, 0x20, 0xd6, 0x71, 0x4c, 0xcc, 0x80, 0x32, 0x2b, 0x37, 0x8a, 0xa7, 0x6e, 0x5b, - 0x71, 0x80, 0xb6, 0xe1, 0x98, 0x58, 0x65, 0xdd, 0x50, 0x0b, 0xe4, 0xc3, 0x0e, 0xd1, 0x2c, 0x7b, - 0xcf, 0xc9, 0x46, 0x97, 0xa4, 0xeb, 0xa9, 0x95, 0x5b, 0x67, 0x40, 0x9c, 0x32, 0xad, 0xe2, 0xfa, - 0x46, 0xb3, 0x6e, 0xef, 0x39, 0xe5, 0xd4, 0x71, 0x3f, 0x9f, 0x10, 0x1f, 0x6a, 0xe2, 0xb0, 0x43, - 0xe8, 0x8f, 0xdc, 0x16, 0xf8, 0x34, 0x3a, 0xff, 0x9e, 0x6b, 0xb1, 0xf9, 0x27, 0x55, 0xfa, 0x13, - 0xbd, 0x06, 0x08, 0x73, 0x3c, 0x6c, 0x6a, 0x54, 0x47, 0x34, 0xba, 0xc0, 0x08, 0x5b, 0xa0, 0x12, - 0xb4, 0x54, 0x75, 0x4f, 0x5f, 0xc7, 0x47, 0x5c, 0x42, 0x42, 0x4e, 0xbf, 0x1d, 0x85, 0xcc, 0x60, - 0x2a, 0x0c, 0x7e, 0x0d, 0xe2, 0x6c, 0x73, 0x31, 0x1b, 0x21, 0xb3, 0xf2, 0xc6, 0x44, 0xe2, 0xa0, - 0x5d, 0x8b, 0x4d, 0xd6, 0x4f, 0x15, 0xfd, 0x11, 0x82, 0x18, 0xd1, 0xdb, 0x9e, 0x98, 0x08, 0xfb, - 0x8d, 0xfe, 0x58, 0x82, 0xa5, 0xd1, 0x19, 0x95, 0x8f, 0xd6, 0x37, 0x9a, 0x1b, 0x3a, 0xf1, 0xb0, - 0xbb, 0x8e, 0x8f, 0xea, 0xd5, 0x6c, 0x74, 0x29, 0x7a, 0x3d, 0xb5, 0xb2, 0x35, 0xf9, 0xc0, 0xb5, - 0x73, 0x10, 0x6b, 0xb6, 0xe7, 0x1e, 0xa9, 0xe7, 0x0e, 0x9c, 0x6b, 0xc2, 0x4b, 0x13, 0x41, 0x85, - 0x75, 0x28, 0xc9, 0x75, 0x68, 0x01, 0xa6, 0x1f, 0xe8, 0xed, 0x1e, 0x16, 0xab, 0xe5, 0x1f, 0xb7, - 0x22, 0x6f, 0x4b, 0x85, 0x6b, 0x10, 0xe7, 0x82, 0x41, 0x69, 0x48, 0x96, 0x6a, 0xcd, 0x95, 0xef, - 0xbe, 0xb5, 0x5a, 0xd9, 0x50, 0xa6, 0xc4, 0x16, 0xfc, 0x8f, 0x04, 0x57, 0x9b, 0x9e, 0x8b, 0xf5, - 0x4e, 0xdd, 0xde, 0xc7, 0x84, 0xae, 0xa9, 0x8a, 0x3d, 0xdd, 0x6a, 0x13, 0x64, 0x43, 0x86, 0xb0, - 0x16, 0x4d, 0x37, 0x4d, 0x17, 0x13, 0xc2, 0x07, 0x2c, 0xaf, 0x3e, 0xed, 0xe7, 0x2b, 0x13, 0x1d, - 0x1d, 0xc3, 0x68, 0x2f, 0x73, 0x08, 0xcb, 0xde, 0x37, 0x8c, 0x76, 0x91, 0x8f, 0x54, 0xe2, 0x70, - 0x6a, 0x9a, 0x84, 0x3f, 0xd1, 0x77, 0x20, 0x46, 0xba, 0xba, 0xcd, 0x96, 0x90, 0x5a, 0xb9, 0x16, - 0x92, 0xbf, 0x7f, 0x04, 0x9b, 0x5d, 0xdd, 0x2e, 0xc7, 0xbe, 0xe8, 0xe7, 0xa7, 0x54, 0xc6, 0x8a, - 0xca, 0x00, 0xc4, 0xd3, 0x5d, 0x4f, 0xa3, 0x56, 0x42, 0x68, 0xff, 0xf3, 0xa1, 0x8e, 0xd4, 0x8a, - 0x14, 0x0f, 0xda, 0x46, 0x71, 0xc7, 0xb7, 0x22, 0xa2, 0x7b, 0x92, 0x75, 0xa3, 0xd4, 0xc2, 0xb7, - 0xe0, 0xda, 0x88, 0x00, 0xb6, 0x5d, 0x67, 0x9f, 0xce, 0xa8, 0xf0, 0x8f, 0x71, 0x48, 0xf3, 0x03, - 0xe3, 0xcb, 0x64, 0x78, 0x40, 0xe9, 0x59, 0x06, 0x44, 0xb7, 0x41, 0xc6, 0xb6, 0xc9, 0x11, 0x22, - 0x93, 0x23, 0x24, 0xb0, 0x6d, 0xb2, 0xfe, 0xdf, 0xe2, 0x27, 0x30, 0xca, 0x36, 0x23, 0x71, 0xdc, - 0xcf, 0x47, 0x77, 0xd5, 0x3a, 0x3f, 0x8a, 0xef, 0x42, 0xce, 0xc4, 0x5d, 0x17, 0x1b, 0x3a, 0x3d, - 0x8b, 0x2d, 0x36, 0x75, 0xad, 0xa3, 0xdb, 0xd6, 0x1e, 0x26, 0x5e, 0x36, 0xc6, 0x74, 0x23, 0x3b, - 0xe0, 0xe0, 0x6b, 0xdb, 0x10, 0xed, 0xe8, 0x77, 0x24, 0x98, 0xef, 0xb9, 0x16, 0xd1, 0x5a, 0x47, - 0x5a, 0xdb, 0x31, 0xf4, 0xb6, 0xe5, 0x1d, 0x69, 0x87, 0x0f, 0xb2, 0xd3, 0xec, 0x40, 0xdc, 0x3e, - 0xd7, 0xaa, 0x08, 0x21, 0x15, 0x77, 0x5d, 0x8b, 0x94, 0x8f, 0xee, 0x0a, 0x84, 0xf5, 0x07, 0x4c, - 0x69, 0xcb, 0x0b, 0xc7, 0xfd, 0xbc, 0xb2, 0xab, 0xd6, 0xc3, 0x4d, 0xf7, 0x54, 0xa5, 0x37, 0xc2, - 0x8c, 0xf4, 0xc0, 0x9c, 0x58, 0x8e, 0xad, 0x39, 0xdc, 0x3e, 0x65, 0xe3, 0x4c, 0x50, 0x2b, 0x17, - 0xb7, 0x6c, 0xea, 0x1c, 0x1e, 0xb3, 0xc1, 0xbf, 0x2f, 0x41, 0x8e, 0xba, 0x0a, 0x6c, 0x50, 0x31, - 0x05, 0x1e, 0x46, 0x73, 0xb1, 0xe1, 0xb8, 0x66, 0x36, 0x41, 0xe5, 0x54, 0x6e, 0xfe, 0xdb, 0xa4, - 0x1e, 0x82, 0xf9, 0xaa, 0x5e, 0xcf, 0x32, 0x8b, 0xbb, 0xbb, 0xf5, 0xea, 0x71, 0x3f, 0x9f, 0xdd, - 0xf6, 0xc1, 0x83, 0x4d, 0x54, 0x19, 0xb4, 0x9a, 0xed, 0x9e, 0xd2, 0x82, 0xde, 0x86, 0x8c, 0xe1, - 0xb4, 0xdb, 0xd8, 0x60, 0xcb, 0xde, 0x55, 0xeb, 0x59, 0x99, 0x6d, 0xf0, 0xdc, 0x71, 0x3f, 0x9f, - 0xae, 0x04, 0x2d, 0x74, 0xab, 0xd3, 0x46, 0xf8, 0x13, 0xa9, 0x30, 0x1b, 0x12, 0x18, 0xf3, 0x03, - 0x49, 0x26, 0xad, 0x1b, 0x13, 0x9b, 0x30, 0x35, 0x83, 0x87, 0xbe, 0x73, 0x15, 0xb8, 0x72, 0xe2, - 0x2e, 0x9e, 0x67, 0x7a, 0x92, 0x61, 0xd3, 0xa3, 0x40, 0x86, 0x6f, 0x4a, 0x70, 0xa0, 0x3e, 0xcf, - 0x40, 0x46, 0xc5, 0xc4, 0x73, 0x5c, 0xec, 0x9f, 0xa8, 0xcf, 0x25, 0x98, 0xa7, 0x11, 0x80, 0x6b, - 0x75, 0x3d, 0xc7, 0xd5, 0x5c, 0xfc, 0xd0, 0xb5, 0x3c, 0x4c, 0xb2, 0x11, 0xa6, 0x74, 0xa5, 0x33, - 0x96, 0x30, 0x0c, 0x54, 0xac, 0x06, 0x20, 0xaa, 0xc0, 0xe0, 0x7a, 0x77, 0xfb, 0x47, 0x3f, 0xcf, - 0xdf, 0x9a, 0x68, 0x1f, 0xc7, 0x83, 0x92, 0x62, 0xbd, 0xaa, 0x22, 0x73, 0x0c, 0x18, 0x3d, 0x07, - 0x31, 0xaa, 0xb7, 0xcc, 0x55, 0x24, 0xcb, 0xf2, 0x71, 0x3f, 0x1f, 0xa3, 0x9a, 0xad, 0x32, 0xea, - 0xd0, 0x01, 0x8f, 0x3d, 0xc3, 0x01, 0x5f, 0x85, 0x94, 0xa7, 0xb7, 0xda, 0x58, 0xa3, 0x23, 0x13, - 0x71, 0xfc, 0x5e, 0x1e, 0x91, 0x04, 0xb9, 0xdf, 0x6e, 0xe9, 0x04, 0x17, 0x77, 0x28, 0x67, 0x68, - 0xed, 0xe0, 0xf9, 0x04, 0x82, 0x96, 0x21, 0xe5, 0x3c, 0xc0, 0xae, 0x6b, 0x99, 0x58, 0x33, 0x5b, - 0xec, 0x0c, 0x25, 0xcb, 0x99, 0xe3, 0x7e, 0x1e, 0xb6, 0x04, 0xb9, 0x5a, 0x56, 0xc1, 0x67, 0xa9, - 0xb6, 0x90, 0x07, 0x0b, 0xc2, 0x68, 0x04, 0xe7, 0x9f, 0xe9, 0x53, 0x82, 0x4d, 0xe1, 0xdd, 0xc9, - 0x37, 0x83, 0xef, 0xbb, 0xaf, 0x3c, 0x2c, 0xb2, 0xe0, 0x8b, 0x44, 0xad, 0xb1, 0x16, 0xf4, 0x2a, - 0xcc, 0x75, 0x5d, 0xdc, 0xd5, 0x5d, 0xac, 0x19, 0x4e, 0xa7, 0xdb, 0xc6, 0x1e, 0x36, 0x99, 0xf6, - 0xcb, 0xaa, 0x22, 0x1a, 0x2a, 0x3e, 0x1d, 0xbd, 0x44, 0xbd, 0x92, 0xee, 0xd1, 0x80, 0x87, 0x60, - 0x97, 0x72, 0x26, 0x19, 0x67, 0x9a, 0x51, 0xeb, 0x82, 0x88, 0xde, 0x84, 0x2b, 0x83, 0x7d, 0x23, - 0x5a, 0xb7, 0xd7, 0x6a, 0x5b, 0xe4, 0x00, 0x9b, 0x59, 0x60, 0xdc, 0x0b, 0xa1, 0xc6, 0x6d, 0xbf, - 0x0d, 0x1d, 0x0d, 0xa9, 0xa2, 0x41, 0x05, 0xa3, 0xef, 0xe3, 0x6c, 0x6a, 0x49, 0xba, 0x3e, 0x5d, - 0x5e, 0x7b, 0xda, 0xcf, 0x57, 0x27, 0xd6, 0x23, 0x82, 0x3b, 0xcb, 0x9e, 0x8b, 0x71, 0x48, 0x2d, - 0x2b, 0x02, 0x2f, 0xac, 0x51, 0x3e, 0x0d, 0xa9, 0x00, 0x83, 0x23, 0x98, 0x9d, 0x79, 0x66, 0x6b, - 0x17, 0x42, 0x41, 0x25, 0x48, 0xf0, 0xa0, 0x9a, 0x64, 0xd3, 0x6c, 0x03, 0xbf, 0x7d, 0x9a, 0x0e, - 0x31, 0xae, 0xd0, 0x2e, 0xf9, 0xfd, 0x50, 0x15, 0xc0, 0x3b, 0xea, 0xfa, 0x9a, 0x98, 0x61, 0x28, - 0x2f, 0x9d, 0x86, 0x72, 0xd4, 0x0d, 0x2b, 0x62, 0xd2, 0x13, 0xdf, 0x04, 0x35, 0x60, 0x86, 0x47, - 0xec, 0x02, 0x67, 0x96, 0xe1, 0xbc, 0x72, 0x0a, 0x0e, 0x0b, 0x57, 0xf4, 0x10, 0x52, 0x8a, 0x04, - 0x14, 0x82, 0xb6, 0x21, 0x43, 0x63, 0x4c, 0xca, 0x29, 0xd0, 0x14, 0x86, 0x76, 0xe3, 0x14, 0xb4, - 0xaa, 0x60, 0x0e, 0xe1, 0xa5, 0xcd, 0x10, 0x8d, 0xe4, 0x7e, 0x21, 0xc1, 0xdc, 0x98, 0xf1, 0x40, - 0x3b, 0x10, 0x09, 0xd2, 0x06, 0x6a, 0xd3, 0x23, 0x2c, 0x65, 0xb8, 0x8c, 0x21, 0x89, 0x58, 0x26, - 0xda, 0x87, 0x24, 0x55, 0x67, 0xdb, 0xa3, 0x39, 0x49, 0x84, 0x81, 0x37, 0x8e, 0xfb, 0x79, 0x79, - 0x9b, 0x11, 0x2f, 0x3d, 0x84, 0xcc, 0xc1, 0xeb, 0x26, 0xca, 0x43, 0xca, 0x73, 0x34, 0xfc, 0xc8, - 0x22, 0x9e, 0x65, 0xef, 0xb3, 0x60, 0x41, 0x56, 0xc1, 0x73, 0x6a, 0x82, 0x92, 0xfb, 0x93, 0x08, - 0xa0, 0xf1, 0x53, 0x8a, 0xfe, 0x41, 0x82, 0xe7, 0xfc, 0x18, 0xc0, 0x71, 0xad, 0x7d, 0xcb, 0xd6, - 0xdb, 0x43, 0xc1, 0x80, 0xc4, 0xa4, 0xfd, 0xe9, 0x65, 0x4c, 0x81, 0x08, 0x10, 0xb6, 0x04, 0xfc, - 0x68, 0xa0, 0xf0, 0x1c, 0xf5, 0xa0, 0x3c, 0x50, 0x18, 0x63, 0xb9, 0xa7, 0x66, 0x7b, 0xa7, 0x74, - 0xce, 0xad, 0xc3, 0xf3, 0x67, 0x02, 0x5f, 0xc4, 0x77, 0xe5, 0x7e, 0x24, 0xc1, 0xb5, 0x53, 0x3c, - 0x4a, 0x18, 0x27, 0xcd, 0x71, 0x3e, 0x0c, 0xe3, 0xa4, 0x56, 0x7e, 0xe5, 0x12, 0x5e, 0x2b, 0x34, - 0x89, 0x46, 0x4c, 0x96, 0x94, 0x48, 0xe1, 0x0d, 0x98, 0x15, 0x9d, 0x7c, 0x3f, 0x8a, 0x9e, 0x07, - 0x38, 0xb0, 0xf6, 0x0f, 0xb4, 0x87, 0xba, 0x87, 0x5d, 0x91, 0x4b, 0x26, 0x29, 0xe5, 0x23, 0x4a, - 0x28, 0xfc, 0xab, 0x0c, 0xe9, 0x7a, 0xa7, 0xeb, 0xb8, 0x9e, 0xef, 0x65, 0xef, 0x42, 0x9c, 0xf9, - 0x05, 0x22, 0xf6, 0xaf, 0x78, 0xc6, 0x0c, 0x87, 0x7a, 0x72, 0xff, 0x22, 0xcc, 0x82, 0xc0, 0x08, - 0xdc, 0x5f, 0xe4, 0x44, 0xf7, 0xf7, 0x1e, 0xc4, 0x79, 0x49, 0x40, 0x04, 0xe4, 0xf9, 0x13, 0x22, - 0xf9, 0xfa, 0xd6, 0x1d, 0xab, 0x8d, 0xef, 0x30, 0x36, 0x1f, 0x9c, 0x77, 0x42, 0x2f, 0x83, 0x4c, - 0x88, 0xa7, 0x11, 0xeb, 0x87, 0xdc, 0x7b, 0x46, 0x79, 0x4e, 0xda, 0x6c, 0xee, 0x34, 0xad, 0x1f, - 0x62, 0x35, 0x41, 0x88, 0x47, 0x7f, 0xa0, 0x1c, 0xc8, 0x0f, 0xf5, 0x76, 0x9b, 0x79, 0xd9, 0x69, - 0x96, 0x83, 0x07, 0xdf, 0xc3, 0xc7, 0x2c, 0xfe, 0xcd, 0x1e, 0x33, 0xe1, 0x30, 0xbb, 0xba, 0x77, - 0xc0, 0x22, 0xc7, 0xa4, 0x0a, 0x9c, 0xb4, 0xad, 0x7b, 0x07, 0x28, 0x0b, 0x09, 0xa2, 0x53, 0xdf, - 0x45, 0xb2, 0xf2, 0x52, 0xf4, 0xfa, 0x8c, 0xea, 0x7f, 0xa2, 0x45, 0x60, 0x9e, 0x97, 0x7f, 0x32, - 0x27, 0x16, 0x55, 0x43, 0x14, 0x26, 0x87, 0x43, 0xab, 0xab, 0xed, 0x1d, 0x12, 0xee, 0xb4, 0x84, - 0x1c, 0x0e, 0xad, 0xee, 0x9d, 0x75, 0xa2, 0x26, 0x68, 0xe3, 0x9d, 0x43, 0x82, 0x5e, 0x81, 0x59, - 0x8b, 0x65, 0x2e, 0x9a, 0x69, 0xb9, 0xd8, 0xf0, 0xda, 0x47, 0xcc, 0x61, 0xc9, 0x6a, 0x86, 0x93, - 0xab, 0x82, 0x8a, 0x6e, 0x80, 0x32, 0xea, 0x66, 0x99, 0xa3, 0x91, 0xd5, 0xd9, 0x11, 0x2f, 0x4b, - 0x59, 0xf9, 0x56, 0x87, 0x1c, 0x67, 0x9a, 0xb3, 0x72, 0xfa, 0xc0, 0x67, 0x16, 0x61, 0xbe, 0xab, - 0xbb, 0x04, 0x6b, 0xad, 0x9e, 0x6d, 0xb6, 0xb1, 0xc6, 0x6d, 0x75, 0x36, 0xc3, 0xb8, 0xe7, 0x58, - 0x53, 0x99, 0xb5, 0x70, 0xb3, 0x7e, 0x5e, 0xec, 0x7d, 0xf5, 0xff, 0x20, 0xf6, 0xce, 0xfd, 0x34, - 0x02, 0xd3, 0x4c, 0xcf, 0xd1, 0x2d, 0x88, 0xd1, 0x6d, 0x16, 0x99, 0xdd, 0xa4, 0x31, 0x17, 0xeb, - 0x83, 0x10, 0xc4, 0x6c, 0xbd, 0x83, 0xb3, 0x88, 0x29, 0x01, 0xfb, 0x8d, 0xae, 0x41, 0x82, 0xe0, - 0xfb, 0xda, 0x03, 0xbd, 0x9d, 0x9d, 0x67, 0x3b, 0x1c, 0x27, 0xf8, 0xfe, 0x3d, 0xbd, 0x8d, 0xae, - 0x40, 0xdc, 0x22, 0x9a, 0x8d, 0x1f, 0x66, 0x17, 0x98, 0xa4, 0xa6, 0x2d, 0xb2, 0x89, 0x1f, 0x32, - 0xb3, 0xad, 0xbb, 0xfb, 0xd8, 0xd3, 0x0c, 0xa7, 0x4d, 0xb2, 0x57, 0xe8, 0x01, 0xa3, 0x21, 0x1d, - 0x25, 0x55, 0x9c, 0x36, 0x41, 0xff, 0x0f, 0x92, 0x0f, 0x75, 0xa2, 0xe1, 0x4e, 0xd7, 0x3b, 0x62, - 0xc2, 0x92, 0xa9, 0xda, 0x93, 0x1a, 0xfd, 0x6e, 0xc4, 0xe4, 0x88, 0x12, 0x6d, 0xc4, 0xe4, 0xa8, - 0x12, 0x6b, 0xc4, 0xe4, 0x98, 0x32, 0xdd, 0x88, 0xc9, 0xd3, 0x4a, 0xbc, 0x11, 0x93, 0xe3, 0x4a, - 0xa2, 0x11, 0x93, 0x13, 0x8a, 0xdc, 0x88, 0xc9, 0xb2, 0x92, 0x6c, 0xc4, 0xe4, 0xa4, 0x02, 0x8d, - 0x98, 0x0c, 0x4a, 0xaa, 0x11, 0x93, 0x53, 0xca, 0x4c, 0x23, 0x26, 0xcf, 0x28, 0xe9, 0x46, 0x4c, - 0x4e, 0x2b, 0x99, 0x46, 0x4c, 0xce, 0x28, 0xb3, 0x8d, 0x98, 0x3c, 0xab, 0x28, 0x8d, 0x98, 0xac, - 0x28, 0x73, 0x8d, 0x98, 0x3c, 0xa7, 0xa0, 0xc2, 0xe7, 0x12, 0x28, 0x4d, 0x7c, 0xbf, 0x87, 0x6d, - 0x03, 0xdf, 0xd3, 0xdb, 0x95, 0x83, 0x9e, 0x7d, 0x88, 0x5e, 0x86, 0x59, 0x83, 0xfe, 0xd0, 0x78, - 0x62, 0x4c, 0x97, 0x2a, 0xb1, 0xa5, 0xa6, 0x19, 0xb9, 0x49, 0xa9, 0x74, 0xc5, 0xcf, 0x03, 0x08, - 0x3e, 0x7a, 0xb2, 0x79, 0xd5, 0x2c, 0xc9, 0x59, 0xe8, 0x71, 0x1e, 0x81, 0x71, 0x9d, 0x87, 0xcc, - 0x7c, 0x0c, 0xc1, 0xa8, 0xce, 0x43, 0xb4, 0x0c, 0x0b, 0x36, 0x7e, 0xe4, 0x69, 0xa3, 0xcc, 0xcc, - 0x54, 0xa8, 0x73, 0xb4, 0xad, 0x12, 0xee, 0x50, 0xf8, 0x97, 0x08, 0xcc, 0xfa, 0x93, 0xf6, 0xcd, - 0xe1, 0x1e, 0x28, 0x74, 0x5b, 0x2c, 0x53, 0xf3, 0x1c, 0x8e, 0xe4, 0x1b, 0xc6, 0xf7, 0xce, 0x30, - 0x8c, 0x23, 0x28, 0xf4, 0xbb, 0x6e, 0xee, 0x38, 0x6c, 0x38, 0xee, 0x1a, 0xd4, 0x34, 0x09, 0xd3, - 0x72, 0xbb, 0x90, 0xf1, 0x3b, 0x71, 0x0a, 0xaa, 0x40, 0x7c, 0x68, 0xbc, 0x57, 0x27, 0x18, 0xcf, - 0x17, 0xb5, 0x2a, 0xba, 0xe6, 0x7e, 0x03, 0xd0, 0xf8, 0xd8, 0x61, 0xb7, 0x34, 0xcd, 0xdd, 0xd2, - 0xd6, 0xb0, 0x5b, 0x7a, 0xe7, 0x62, 0x6b, 0x0b, 0x4d, 0x3b, 0x9c, 0xd5, 0xfd, 0x53, 0x04, 0x32, - 0xdc, 0x45, 0x04, 0xee, 0xe8, 0x55, 0x98, 0x63, 0x46, 0xcb, 0xb2, 0xf7, 0xb5, 0xae, 0x20, 0xb2, - 0xf5, 0x45, 0x54, 0xc5, 0x6f, 0x08, 0x98, 0x5f, 0x80, 0xb4, 0x8b, 0x75, 0x73, 0xc0, 0x18, 0x61, - 0x8c, 0x33, 0x94, 0x18, 0x30, 0xbd, 0x04, 0x19, 0xe6, 0x0d, 0x07, 0x5c, 0x51, 0xc6, 0x95, 0x66, - 0xd4, 0x80, 0xad, 0x0c, 0x69, 0xd2, 0xd5, 0xed, 0x01, 0x57, 0x8c, 0x09, 0xf5, 0x9c, 0xda, 0xd1, - 0x0c, 0xed, 0x13, 0xf6, 0xa5, 0x2e, 0x26, 0xbd, 0x0e, 0xd6, 0xba, 0x0e, 0x4f, 0xb6, 0xa2, 0x6a, - 0x92, 0x53, 0xb6, 0x1d, 0x82, 0x76, 0x99, 0xaa, 0x30, 0x59, 0x68, 0x26, 0x17, 0x4e, 0x36, 0xce, - 0x46, 0xb9, 0x39, 0xb9, 0x38, 0xd5, 0x59, 0x32, 0x4c, 0x28, 0xfc, 0x8d, 0x04, 0xd7, 0x68, 0xc0, - 0xcc, 0xad, 0x62, 0x85, 0x55, 0xb2, 0x7d, 0xed, 0xd4, 0x21, 0xc1, 0x82, 0xee, 0x20, 0xfe, 0x5c, - 0x3b, 0xee, 0xe7, 0xe3, 0x94, 0xfb, 0xd2, 0x9e, 0x2b, 0x4e, 0x81, 0xeb, 0x2c, 0x3d, 0xf2, 0x5c, - 0xdd, 0x26, 0x16, 0x4d, 0x14, 0xe8, 0xb6, 0x75, 0x70, 0xa7, 0x85, 0x5d, 0xbe, 0x19, 0x33, 0xea, - 0xc2, 0x50, 0xe3, 0x06, 0x6f, 0x2b, 0xe4, 0x20, 0x3b, 0x3a, 0xe5, 0x20, 0xb3, 0xff, 0x55, 0xb8, - 0xba, 0x89, 0x1f, 0x9e, 0xb4, 0x9a, 0x32, 0x24, 0xb8, 0xfd, 0xf2, 0x55, 0xfe, 0xfa, 0xa8, 0x55, - 0x0d, 0x17, 0xf3, 0x8b, 0x6c, 0xa6, 0x3b, 0xac, 0x83, 0xea, 0x77, 0x2c, 0x7c, 0x0a, 0xd7, 0x46, - 0xd0, 0x83, 0xed, 0x7b, 0x1f, 0xe2, 0x34, 0xf3, 0x13, 0x91, 0x4d, 0x66, 0x3c, 0xab, 0x18, 0x47, - 0x6f, 0x52, 0x7e, 0x55, 0x74, 0x2b, 0xa8, 0xac, 0x24, 0xd1, 0xeb, 0x60, 0xaa, 0x21, 0x77, 0x2d, - 0xe2, 0xa1, 0x0f, 0x60, 0x46, 0x68, 0x04, 0x55, 0x14, 0x7f, 0xda, 0xe7, 0x28, 0x55, 0xca, 0x0d, - 0x40, 0x48, 0xe1, 0x6f, 0x25, 0x98, 0xaf, 0xba, 0x4e, 0xb7, 0x8b, 0x4d, 0xe1, 0x2b, 0xb8, 0x2c, - 0x7c, 0x17, 0x21, 0x85, 0x5c, 0xc4, 0x26, 0x44, 0xea, 0x55, 0x91, 0x0b, 0xdc, 0xbe, 0x6c, 0x8a, - 0x51, 0xaf, 0xa2, 0x77, 0xb8, 0x40, 0x7a, 0x84, 0xd9, 0xcf, 0xcc, 0x58, 0xd2, 0x37, 0xa4, 0xa6, - 0x8c, 0x51, 0x15, 0x1d, 0x0a, 0x3f, 0x49, 0xc0, 0x95, 0xb0, 0x90, 0x57, 0x2b, 0xfe, 0xc4, 0x3f, - 0x83, 0x84, 0x65, 0x9b, 0xf8, 0x11, 0x9e, 0xc8, 0x4e, 0x9e, 0x04, 0x51, 0x14, 0xf2, 0xa8, 0x53, - 0x18, 0x3f, 0xcd, 0x14, 0x98, 0xe8, 0xe3, 0x20, 0x3c, 0xe5, 0x65, 0x9f, 0x5b, 0xcf, 0x8c, 0x5e, - 0x1d, 0x09, 0x55, 0x87, 0x22, 0x41, 0xe6, 0x50, 0xbe, 0xa1, 0x48, 0xb0, 0x09, 0x73, 0x96, 0xed, - 0x61, 0xb7, 0x8d, 0xf5, 0x07, 0x34, 0xb0, 0xa1, 0xc3, 0x8b, 0xea, 0xcf, 0xa4, 0x61, 0x84, 0x12, - 0x02, 0xe0, 0xe1, 0xc8, 0x67, 0x30, 0x1f, 0x06, 0xf5, 0xb7, 0xe0, 0xec, 0x8a, 0x10, 0x93, 0xf0, - 0x00, 0xd6, 0x2f, 0xbc, 0x84, 0x80, 0xea, 0x42, 0xec, 0xf7, 0x20, 0xce, 0x13, 0x7d, 0x51, 0x5e, - 0xbd, 0xfd, 0xac, 0x62, 0xe7, 0x05, 0x04, 0x55, 0xa0, 0xe5, 0xfe, 0x48, 0x82, 0x99, 0xf0, 0x76, - 0x23, 0x0b, 0x64, 0x36, 0x77, 0xdf, 0xa4, 0x45, 0xcb, 0x9b, 0x34, 0x96, 0x65, 0x8d, 0x6c, 0x0f, - 0xde, 0x7f, 0xe6, 0x3d, 0xe0, 0x10, 0x42, 0x95, 0xea, 0x26, 0x0d, 0x90, 0x4c, 0xd7, 0xe9, 0x0e, - 0xca, 0xeb, 0x51, 0x55, 0xa6, 0x04, 0x1a, 0xf3, 0xe5, 0x7e, 0x13, 0x92, 0x81, 0xa2, 0x84, 0x32, - 0xfc, 0xe8, 0xd7, 0x98, 0xe1, 0x9f, 0x39, 0x7e, 0x15, 0xd2, 0x43, 0x12, 0x43, 0x57, 0x83, 0x39, - 0xc4, 0xca, 0x71, 0x3e, 0x87, 0x73, 0x51, 0x0a, 0x3f, 0x8f, 0xc3, 0xfc, 0x49, 0x96, 0xf6, 0x13, - 0x50, 0x42, 0x76, 0x4b, 0x6b, 0x5b, 0xc4, 0x13, 0xe7, 0xe9, 0xc6, 0xd9, 0x09, 0x69, 0xc8, 0xf8, - 0x09, 0x6d, 0xc9, 0xb8, 0xc3, 0x26, 0xf1, 0xfb, 0x90, 0x31, 0xf9, 0xc4, 0x35, 0x71, 0x50, 0xa3, - 0xe7, 0xe6, 0x91, 0x27, 0x18, 0x40, 0x81, 0x9e, 0x36, 0x43, 0x4d, 0x84, 0xdd, 0x3b, 0xf8, 0xe8, - 0x41, 0x6d, 0xc7, 0x32, 0xd9, 0xe9, 0x49, 0x97, 0x9b, 0xc7, 0xfd, 0xfc, 0x9c, 0xc0, 0xf2, 0x8b, - 0x39, 0x97, 0xde, 0xa9, 0x39, 0x73, 0x04, 0xd0, 0xa4, 0x5e, 0x97, 0xb6, 0xd3, 0x81, 0xa7, 0x07, - 0x5e, 0x97, 0x9e, 0xa3, 0xcb, 0x7b, 0x5d, 0xfa, 0xb3, 0x6e, 0xa2, 0xdf, 0x95, 0x60, 0x8e, 0x57, - 0x76, 0x3b, 0x3d, 0x4f, 0xe7, 0xe5, 0x7a, 0x3f, 0x3f, 0xfd, 0xe4, 0xb8, 0x9f, 0x9f, 0x65, 0x02, - 0xd9, 0x10, 0x6d, 0x6c, 0xd8, 0xf2, 0xb3, 0x0e, 0x3b, 0x40, 0x11, 0x39, 0x5b, 0x40, 0x30, 0xd1, - 0x3a, 0x64, 0x78, 0xb2, 0xad, 0xd1, 0x74, 0xd3, 0x72, 0x6c, 0x96, 0xb8, 0xa6, 0xcb, 0x2f, 0x3e, - 0xed, 0xe7, 0x97, 0x4e, 0xd0, 0x2c, 0x9e, 0xa7, 0xdf, 0xe3, 0xbc, 0x6a, 0x7a, 0x2f, 0xfc, 0x89, - 0x0c, 0x48, 0x07, 0xaa, 0x71, 0xd4, 0x15, 0x79, 0xee, 0xe5, 0x5d, 0xd9, 0x8c, 0xaf, 0x23, 0x14, - 0x13, 0xed, 0xc3, 0xac, 0x3f, 0x08, 0x77, 0xe8, 0x24, 0x9b, 0xfc, 0x5a, 0x86, 0xf1, 0xd5, 0x9a, - 0xaf, 0x9a, 0x88, 0x92, 0xcb, 0x55, 0x58, 0x38, 0x31, 0xca, 0xf9, 0x32, 0x0e, 0x57, 0x87, 0x0d, - 0x61, 0x10, 0x87, 0x68, 0xa3, 0x1e, 0xf2, 0xfd, 0x89, 0x8d, 0xa9, 0x8f, 0xc1, 0x8d, 0x99, 0xff, - 0x35, 0xea, 0x23, 0x3f, 0x1b, 0xf1, 0x91, 0xcf, 0x80, 0xcf, 0xd4, 0x6b, 0x04, 0xdf, 0x77, 0x94, - 0x1f, 0x07, 0xbe, 0x80, 0x57, 0x6d, 0x3e, 0x78, 0x06, 0x78, 0xd6, 0xdf, 0xff, 0x0c, 0xbc, 0xc1, - 0x3f, 0x4b, 0x90, 0x1e, 0x5a, 0xd9, 0x2f, 0xd3, 0x1d, 0x6c, 0x07, 0xd1, 0x10, 0x7f, 0x5e, 0xf1, - 0xf6, 0xc5, 0x97, 0x35, 0x1c, 0x24, 0xe5, 0xfe, 0x5e, 0x82, 0xf4, 0x90, 0x20, 0xbf, 0x21, 0x47, - 0xf2, 0xf5, 0xcf, 0xbc, 0x05, 0x99, 0xe1, 0x2d, 0x0a, 0x8d, 0x21, 0x7d, 0x3d, 0x63, 0x14, 0xbe, - 0x07, 0x71, 0x4e, 0x41, 0x08, 0x32, 0x1f, 0x95, 0xea, 0x3b, 0xf5, 0xcd, 0x55, 0xed, 0xce, 0x96, - 0xaa, 0xad, 0x56, 0x94, 0x29, 0x34, 0x03, 0x72, 0xb5, 0x76, 0xb7, 0x46, 0x89, 0x8a, 0x84, 0x52, - 0x90, 0x60, 0x5f, 0xb5, 0xaa, 0x12, 0x29, 0x94, 0x41, 0xe1, 0xd8, 0x7b, 0x98, 0x3a, 0x06, 0x1a, - 0xf7, 0xa3, 0x22, 0xcc, 0xb3, 0x20, 0xbd, 0x43, 0xe3, 0x37, 0xea, 0x0a, 0xb5, 0x50, 0xf4, 0x3c, - 0x17, 0x34, 0x51, 0xa7, 0xb8, 0xa9, 0x77, 0x70, 0xe1, 0xef, 0x62, 0x30, 0x37, 0x00, 0xf1, 0xdd, - 0xe2, 0xcb, 0x20, 0x13, 0xcb, 0x3e, 0xd4, 0x06, 0x97, 0xe6, 0xbc, 0x90, 0x66, 0xd9, 0x87, 0xbb, - 0x6a, 0x5d, 0x4d, 0xd0, 0xc6, 0x5d, 0xd7, 0x42, 0x0d, 0x88, 0x39, 0x5d, 0xcf, 0xcf, 0x21, 0xdf, - 0x3a, 0x43, 0x14, 0x63, 0x63, 0x14, 0xb7, 0xba, 0x9e, 0xa8, 0x00, 0x30, 0x0c, 0xf4, 0x57, 0xd2, - 0x20, 0xeb, 0xe1, 0xd9, 0xe2, 0x3b, 0x17, 0xc2, 0xe3, 0x02, 0x10, 0x37, 0x98, 0x1f, 0xd1, 0x83, - 0xfa, 0xb4, 0x9f, 0x9f, 0x1b, 0x15, 0x10, 0xb9, 0xe4, 0xd5, 0xa6, 0x3f, 0x45, 0xd4, 0xe0, 0x97, - 0x6a, 0x03, 0x41, 0x33, 0x87, 0x30, 0xe1, 0xbd, 0x65, 0x7a, 0x68, 0x23, 0x72, 0xfb, 0x30, 0x13, - 0x9e, 0xfd, 0x09, 0xd5, 0xf2, 0xd2, 0x70, 0x59, 0xe2, 0xd5, 0x89, 0x24, 0x23, 0x52, 0xc2, 0x50, - 0x89, 0xfe, 0x7b, 0x90, 0x0c, 0xc4, 0x7e, 0x91, 0xda, 0x3e, 0xb7, 0xf1, 0x41, 0xb1, 0x6c, 0x5a, - 0x89, 0x17, 0xfe, 0x5a, 0x82, 0x19, 0x15, 0x13, 0xa7, 0xfd, 0x00, 0x9b, 0x34, 0xe6, 0x09, 0xde, - 0xa2, 0x48, 0x93, 0xbf, 0x45, 0x29, 0x41, 0x32, 0xa8, 0x66, 0x5e, 0xe4, 0x5d, 0xc7, 0xa0, 0x17, - 0xba, 0x01, 0x4a, 0xcb, 0xe9, 0xd9, 0xa6, 0xee, 0x1e, 0x69, 0x2e, 0xd6, 0x8d, 0x03, 0x6c, 0x8a, - 0x9b, 0x9b, 0x59, 0x9f, 0xae, 0x72, 0x72, 0xe1, 0xc7, 0x11, 0x40, 0x03, 0xe1, 0x84, 0x4c, 0x11, - 0x8d, 0xdc, 0xd8, 0x3a, 0x44, 0xf2, 0x1a, 0x39, 0xf1, 0xae, 0x6d, 0x24, 0x00, 0x0c, 0x16, 0xee, - 0x6f, 0xa9, 0x1b, 0xa2, 0x11, 0xf4, 0x87, 0x67, 0xd7, 0x6c, 0xa3, 0xac, 0x66, 0xcb, 0xb4, 0xf4, - 0x97, 0x5a, 0xb7, 0x15, 0xce, 0xfa, 0xbf, 0x63, 0x80, 0x2a, 0x2e, 0xd6, 0x3d, 0x4c, 0x2d, 0x0f, - 0x39, 0x2b, 0xd7, 0x2e, 0xc3, 0x34, 0x4f, 0xcc, 0x22, 0x17, 0x49, 0xcc, 0x84, 0x50, 0x78, 0x57, - 0xf4, 0x03, 0x98, 0x31, 0x9c, 0x76, 0xaf, 0x63, 0x6b, 0xec, 0xc6, 0x59, 0x04, 0xc2, 0xdf, 0x3d, - 0x4b, 0x89, 0xc7, 0x26, 0x57, 0xac, 0x38, 0x6d, 0xfa, 0xed, 0xd7, 0x0e, 0x38, 0x20, 0xe3, 0x40, - 0xcf, 0x41, 0x32, 0x38, 0x50, 0x2c, 0x04, 0x4e, 0xaa, 0x03, 0x02, 0x5a, 0x81, 0x69, 0x9d, 0x68, - 0xce, 0x1e, 0x8b, 0x51, 0xcf, 0xd3, 0x30, 0x35, 0xa6, 0x93, 0xad, 0x3d, 0xf4, 0x26, 0xa4, 0xf7, - 0xee, 0xf3, 0xb8, 0x9d, 0x1b, 0x50, 0xfe, 0x10, 0x60, 0xf6, 0xb8, 0x9f, 0x4f, 0xdd, 0xf9, 0x90, - 0x2d, 0x96, 0x9a, 0x4f, 0x35, 0xb5, 0x77, 0x3f, 0xf8, 0x40, 0x37, 0x61, 0xae, 0xa3, 0x3f, 0xd2, - 0xf6, 0x5c, 0xdd, 0x10, 0x81, 0x6a, 0x9b, 0x5b, 0x05, 0x49, 0x9d, 0xed, 0xe8, 0x8f, 0xee, 0x08, - 0x7a, 0xdd, 0x6c, 0xe3, 0xdc, 0x7f, 0x49, 0x90, 0x10, 0x2b, 0x42, 0x5d, 0x00, 0x21, 0x1e, 0xcb, - 0xe4, 0xa1, 0x50, 0xba, 0xfc, 0xe1, 0x71, 0x3f, 0x9f, 0xac, 0x30, 0x6a, 0xbd, 0x4a, 0x9e, 0xf6, - 0xf3, 0x1f, 0x3c, 0xab, 0xd1, 0xf2, 0x41, 0xd4, 0x24, 0x1f, 0xa4, 0x6e, 0xb2, 0x82, 0xe2, 0x81, - 0x4e, 0xb4, 0x03, 0x8b, 0x78, 0xce, 0xbe, 0xab, 0x77, 0xd8, 0xe6, 0xca, 0xea, 0xcc, 0x81, 0x4e, - 0xd6, 0x7c, 0x1a, 0xca, 0xd1, 0x90, 0xe3, 0x01, 0x7f, 0x30, 0xc0, 0x8f, 0x54, 0xf0, 0x8d, 0x56, - 0xe0, 0x4a, 0xd0, 0x59, 0xa3, 0x8b, 0x6e, 0xf5, 0x8c, 0x43, 0xcc, 0x3c, 0x01, 0xb5, 0x59, 0xf3, - 0x41, 0xe3, 0x86, 0xfe, 0xa8, 0xcc, 0x9b, 0x0a, 0x57, 0x60, 0x3e, 0xb4, 0xad, 0x41, 0x80, 0xf8, - 0x97, 0x33, 0x90, 0xd8, 0xd6, 0x8f, 0xda, 0x8e, 0x6e, 0xa2, 0x25, 0x48, 0xf9, 0x17, 0xfd, 0x34, - 0xc4, 0xe6, 0x7a, 0x18, 0x26, 0x21, 0x0b, 0x32, 0x3d, 0x82, 0x5d, 0xba, 0x27, 0x1a, 0x7b, 0xba, - 0xca, 0x6d, 0x55, 0xb9, 0xfc, 0xb4, 0x9f, 0xbf, 0x3d, 0x99, 0x88, 0xb0, 0xd1, 0x73, 0x2d, 0xef, - 0xa8, 0xd8, 0xfc, 0xf0, 0xee, 0xae, 0x80, 0xa2, 0x07, 0xc9, 0x51, 0xd3, 0xbd, 0xf0, 0xa7, 0x78, - 0x36, 0x41, 0x97, 0xab, 0x75, 0x2c, 0xc3, 0x75, 0x88, 0x5f, 0x5d, 0x17, 0xd4, 0x0d, 0x46, 0x44, - 0xaf, 0xc0, 0xec, 0x9e, 0x65, 0xb3, 0x9b, 0x1d, 0x9f, 0x8f, 0x17, 0xd6, 0x33, 0x3e, 0x59, 0x30, - 0x3e, 0x80, 0x4c, 0xe8, 0xa9, 0x04, 0xdd, 0xea, 0x38, 0xdb, 0xea, 0xad, 0xe3, 0x7e, 0x3e, 0x3d, - 0x38, 0x3a, 0x7c, 0xbb, 0x2f, 0xe3, 0xa3, 0xd2, 0x83, 0x61, 0xe8, 0x66, 0x2f, 0xc0, 0x34, 0x7b, - 0xb2, 0xcc, 0x5f, 0x47, 0xa9, 0xfc, 0x03, 0xbd, 0x05, 0xd3, 0x6d, 0xac, 0x13, 0x2c, 0x1e, 0x3e, - 0x2d, 0x9d, 0x71, 0x18, 0xd9, 0xcb, 0x60, 0x95, 0xb3, 0xa3, 0x32, 0xc4, 0xf9, 0x5d, 0x1d, 0xbb, - 0x61, 0x1b, 0x2f, 0x4d, 0x9e, 0xfa, 0xc6, 0x6d, 0x6d, 0x4a, 0x15, 0x3d, 0x51, 0x0d, 0x12, 0x2e, - 0xbf, 0x9e, 0x65, 0xf7, 0x6e, 0xe7, 0x26, 0xdb, 0xa1, 0xdb, 0xdf, 0xb5, 0x29, 0xd5, 0xef, 0x8b, - 0x76, 0xfc, 0x37, 0x12, 0xdc, 0xaa, 0x8b, 0x27, 0x20, 0xc5, 0x09, 0x03, 0xb2, 0x01, 0xe0, 0x10, - 0x0a, 0x5d, 0xa0, 0xc5, 0x6a, 0xf5, 0xec, 0xfa, 0xee, 0xec, 0x05, 0x0e, 0xdd, 0xfb, 0xd2, 0x05, - 0xf2, 0x9e, 0x68, 0x13, 0xc0, 0x08, 0x3c, 0x0d, 0xbb, 0xd8, 0x4b, 0xad, 0xbc, 0x76, 0x91, 0x68, - 0x66, 0x6d, 0x4a, 0x0d, 0x21, 0xa0, 0x0f, 0x21, 0x65, 0x0c, 0x8e, 0x4e, 0x76, 0x96, 0x01, 0xbe, - 0x7e, 0x21, 0xfb, 0xb9, 0x46, 0x6d, 0xe6, 0x80, 0x3a, 0x6c, 0x33, 0x95, 0x51, 0x9b, 0x59, 0x83, - 0xb4, 0xa8, 0x8b, 0xf0, 0xd7, 0xee, 0xd9, 0x39, 0x66, 0xb2, 0xc3, 0x5a, 0xe2, 0xbf, 0x87, 0x2f, - 0xd6, 0x6c, 0xc3, 0x31, 0xb1, 0x59, 0xa3, 0xdf, 0xaa, 0x28, 0x03, 0xb3, 0x0f, 0x82, 0x56, 0x21, - 0x63, 0xb4, 0xb1, 0x6e, 0xf7, 0xba, 0x3e, 0x0e, 0x9a, 0x10, 0x27, 0x2d, 0xfa, 0x09, 0xa0, 0x4d, - 0x40, 0x7b, 0xec, 0x5d, 0x45, 0x78, 0x56, 0xec, 0x7e, 0x70, 0x12, 0x30, 0x85, 0xf5, 0x55, 0x07, - 0x33, 0x43, 0x2f, 0x42, 0xda, 0x76, 0x6c, 0x43, 0xb7, 0x0d, 0xdc, 0x66, 0xde, 0x8d, 0x5f, 0x29, - 0x0e, 0x13, 0xd1, 0xa7, 0x90, 0x21, 0x43, 0x21, 0x7c, 0xf6, 0x0a, 0x1b, 0xf1, 0x8d, 0x8b, 0x16, - 0xfd, 0xd6, 0xa6, 0xd4, 0x11, 0x24, 0xf4, 0x6b, 0xa0, 0x78, 0x23, 0x37, 0x03, 0xec, 0x72, 0xf2, - 0xec, 0x37, 0x4c, 0xa7, 0xdc, 0x7f, 0xac, 0x4d, 0xa9, 0x63, 0x68, 0xe8, 0x33, 0x98, 0x25, 0xc3, - 0xaf, 0x74, 0xb3, 0xd7, 0xd8, 0x00, 0xdf, 0x39, 0xb3, 0xbc, 0x7d, 0xd2, 0xc3, 0xe6, 0xb5, 0x29, - 0x75, 0x14, 0x8b, 0xc2, 0xdb, 0xc3, 0x17, 0x0c, 0xd9, 0xec, 0xb9, 0xf0, 0x27, 0x5f, 0x78, 0x50, - 0xf8, 0x11, 0xac, 0x72, 0x12, 0x12, 0xe2, 0xee, 0x28, 0xb8, 0x91, 0x4d, 0x28, 0x72, 0xe1, 0x2f, - 0x64, 0x90, 0x83, 0xd0, 0x6d, 0x19, 0x50, 0xe0, 0x6c, 0x07, 0xcf, 0xe0, 0xa8, 0xd7, 0x88, 0xac, - 0x4d, 0xa9, 0x73, 0x7e, 0xdb, 0xe0, 0x25, 0xdc, 0x2b, 0x30, 0xdb, 0x71, 0x4c, 0x6b, 0xcf, 0x1a, - 0xd8, 0x6a, 0x5e, 0x29, 0xcc, 0xf8, 0x64, 0x61, 0xab, 0x6f, 0x0f, 0xbd, 0x16, 0x99, 0xe4, 0x95, - 0xf4, 0xda, 0x54, 0xe8, 0x39, 0x09, 0xf5, 0x1d, 0x6e, 0xcf, 0x66, 0xd7, 0x44, 0x22, 0x5b, 0xe4, - 0x61, 0x49, 0x5a, 0x50, 0x45, 0xc2, 0x57, 0x19, 0x31, 0xa6, 0x37, 0xce, 0x35, 0xa6, 0xfe, 0xda, - 0xd7, 0xa4, 0xc0, 0x9a, 0xde, 0x19, 0xb5, 0xa6, 0x37, 0xcf, 0xb7, 0xa6, 0x21, 0x98, 0xc0, 0x9c, - 0xee, 0x9e, 0x68, 0x4e, 0x97, 0x27, 0xd4, 0xf5, 0x10, 0xe2, 0xb0, 0x3d, 0xad, 0x8c, 0xd8, 0xd3, - 0x1b, 0xe7, 0xda, 0xd3, 0xf0, 0x1a, 0x85, 0x41, 0xdd, 0x3a, 0xc1, 0xa0, 0xbe, 0x3e, 0x91, 0x41, - 0x0d, 0x81, 0x85, 0x2d, 0xaa, 0x7a, 0x92, 0x45, 0x2d, 0x4e, 0x66, 0x51, 0x43, 0x90, 0x43, 0x26, - 0xf5, 0xfb, 0x63, 0xe6, 0x42, 0x39, 0xff, 0xbc, 0x9d, 0x58, 0x22, 0x58, 0x93, 0xc6, 0xec, 0x85, - 0x7e, 0x82, 0xbd, 0x98, 0x63, 0xf0, 0x6f, 0x5e, 0xc0, 0x5e, 0x84, 0x06, 0x18, 0x37, 0x18, 0x1f, - 0xc3, 0x4c, 0xf8, 0x90, 0xb3, 0x57, 0x19, 0x67, 0x9b, 0xa3, 0x53, 0xfe, 0x0a, 0x80, 0xe9, 0x40, - 0xa8, 0x09, 0xfd, 0x60, 0xdc, 0x56, 0xcc, 0x9f, 0x0b, 0x7e, 0xca, 0xf5, 0xe5, 0x9a, 0x34, 0x6e, - 0x2c, 0x00, 0x64, 0xff, 0x3e, 0x3b, 0x64, 0x38, 0x0a, 0x7f, 0x20, 0x41, 0xb4, 0xe1, 0xb4, 0x50, - 0x66, 0x50, 0x6d, 0x62, 0x75, 0xa2, 0xf7, 0x07, 0xec, 0x22, 0xad, 0x79, 0xe1, 0x8c, 0x79, 0x04, - 0xd5, 0xb9, 0xa0, 0x13, 0x7a, 0x17, 0x12, 0x5d, 0x1e, 0xb2, 0x0a, 0xdb, 0x50, 0x38, 0xab, 0x3f, - 0xe7, 0x54, 0xfd, 0x2e, 0x37, 0x6f, 0x84, 0xff, 0x84, 0x67, 0xc3, 0x31, 0x31, 0xca, 0x00, 0x6c, - 0xeb, 0x84, 0x74, 0x0f, 0x5c, 0x9d, 0x60, 0x65, 0x0a, 0x25, 0x20, 0xba, 0xbe, 0xd1, 0x54, 0xa4, - 0x9b, 0x1f, 0x87, 0x6b, 0x43, 0x55, 0xb5, 0x54, 0xdf, 0xac, 0x6f, 0xae, 0x6a, 0x9b, 0xa5, 0x8d, - 0x5a, 0x53, 0x99, 0x42, 0x59, 0x58, 0xf8, 0xa8, 0x54, 0xdf, 0x11, 0xc5, 0x22, 0xad, 0xbe, 0xb9, - 0x53, 0x53, 0xef, 0x95, 0xee, 0x2a, 0x12, 0xba, 0x0a, 0x48, 0xdd, 0xaa, 0xac, 0x37, 0xab, 0x65, - 0xad, 0xb2, 0xb5, 0xb1, 0x5d, 0xaa, 0xec, 0xd4, 0xb7, 0x36, 0x95, 0x08, 0x92, 0x21, 0x56, 0xdd, - 0xda, 0xac, 0x29, 0x70, 0xf3, 0x17, 0x51, 0x88, 0x51, 0xed, 0x40, 0x2f, 0x42, 0x6a, 0x77, 0xb3, - 0xb9, 0x5d, 0xab, 0xd4, 0xef, 0xd4, 0x6b, 0x55, 0x65, 0x2a, 0x37, 0xff, 0xf8, 0xc9, 0xd2, 0x2c, - 0x6d, 0xda, 0xb5, 0x49, 0x17, 0x1b, 0xcc, 0x2c, 0xa2, 0x1c, 0xc4, 0xcb, 0xa5, 0xca, 0xfa, 0xee, - 0xb6, 0x22, 0xe5, 0x32, 0x8f, 0x9f, 0x2c, 0x01, 0x65, 0xe0, 0x26, 0x09, 0x3d, 0x07, 0x09, 0xb5, - 0xd6, 0xdc, 0xd9, 0x52, 0x6b, 0x4a, 0x24, 0x37, 0xfb, 0xf8, 0xc9, 0x52, 0x8a, 0x36, 0x0a, 0x4b, - 0x83, 0x5e, 0x81, 0x74, 0xb3, 0xb2, 0x56, 0xdb, 0x28, 0x69, 0x95, 0xb5, 0xd2, 0xe6, 0x6a, 0x4d, - 0x89, 0xe6, 0x16, 0x1e, 0x3f, 0x59, 0x52, 0x46, 0x55, 0x93, 0x0e, 0x51, 0xdf, 0xd8, 0xde, 0x52, - 0x77, 0x94, 0xd8, 0x60, 0x08, 0x6e, 0x11, 0x50, 0x01, 0x80, 0xf7, 0xbe, 0x53, 0xab, 0x55, 0x95, - 0xe9, 0x1c, 0x7a, 0xfc, 0x64, 0x29, 0x43, 0xdb, 0x07, 0x07, 0x1d, 0xbd, 0x04, 0x33, 0x15, 0xb5, - 0x56, 0xda, 0xa9, 0x69, 0xcd, 0x9d, 0xd2, 0x4e, 0x53, 0x89, 0x0f, 0x56, 0x12, 0x3a, 0xbc, 0xa8, - 0x08, 0x73, 0xa5, 0xdd, 0x9d, 0x2d, 0x6d, 0x88, 0x37, 0x91, 0xbb, 0xf6, 0xf8, 0xc9, 0xd2, 0x3c, - 0xe5, 0x2d, 0xf5, 0x3c, 0x27, 0xcc, 0xff, 0x1a, 0x28, 0x43, 0xf3, 0xd7, 0x56, 0x2b, 0x8a, 0x9c, - 0xbb, 0xfa, 0xf8, 0xc9, 0x12, 0x1a, 0x5d, 0xc2, 0x6a, 0x05, 0xfd, 0x7f, 0xb8, 0xba, 0xf3, 0xc9, - 0x76, 0xad, 0x5a, 0x6b, 0x56, 0xb4, 0xe1, 0x65, 0x27, 0x73, 0xd9, 0xc7, 0x4f, 0x96, 0x16, 0x68, - 0x9f, 0xb1, 0xa5, 0xbf, 0x0e, 0x4a, 0x73, 0x47, 0xad, 0x95, 0x36, 0xb4, 0xfa, 0xe6, 0x6a, 0xad, - 0xc9, 0x36, 0x0b, 0x06, 0x53, 0x1a, 0x39, 0x66, 0x74, 0x09, 0x9b, 0xb5, 0x8f, 0x46, 0xf0, 0x53, - 0x03, 0xfe, 0x91, 0x93, 0x93, 0x93, 0x7f, 0xfc, 0x67, 0x8b, 0x53, 0x7f, 0xfe, 0x93, 0xc5, 0xa9, - 0xf2, 0xf5, 0x2f, 0xfe, 0x63, 0x71, 0xea, 0x8b, 0xe3, 0x45, 0xe9, 0x67, 0xc7, 0x8b, 0xd2, 0x97, - 0xc7, 0x8b, 0xd2, 0xbf, 0x1f, 0x2f, 0x4a, 0xbf, 0xf7, 0xd5, 0xe2, 0xd4, 0xcf, 0xbe, 0x5a, 0x9c, - 0xfa, 0xf2, 0xab, 0xc5, 0xa9, 0x4f, 0xe3, 0x5c, 0x6b, 0x5b, 0x71, 0x96, 0x5d, 0xbd, 0xf9, 0xbf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x3a, 0xa9, 0x6b, 0x2b, 0x39, 0x00, 0x00, +func init() { proto.RegisterFile("jobs/jobspb/jobs.proto", fileDescriptor_jobs_cbbe675afce7b718) } + +var fileDescriptor_jobs_cbbe675afce7b718 = []byte{ + // 4824 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5b, 0x4b, 0x8c, 0x1b, 0x47, + 0x7a, 0x9e, 0x26, 0x39, 0x64, 0xf3, 0x9f, 0x21, 0xd9, 0xac, 0x19, 0x49, 0x34, 0x63, 0x8b, 0xb3, + 0xf4, 0x4b, 0x92, 0x6d, 0x8e, 0x57, 0xce, 0x7a, 0x6d, 0xc5, 0x96, 0xcd, 0x97, 0x66, 0xc8, 0xd1, + 0x3c, 0xd4, 0x9c, 0x91, 0x1f, 0x1b, 0x6f, 0xa7, 0xd9, 0x5d, 0x33, 0xd3, 0x19, 0xb2, 0x9b, 0xea, + 0x6a, 0x4a, 0x9a, 0x0d, 0x90, 0x04, 0x1b, 0x04, 0x58, 0xe8, 0x94, 0x00, 0xd9, 0x5c, 0x12, 0x01, + 0x01, 0xb2, 0x0b, 0xe4, 0x10, 0x20, 0x88, 0x11, 0x24, 0x39, 0xe4, 0x96, 0x8b, 0x0f, 0x09, 0xb0, + 0x97, 0x00, 0x46, 0x0e, 0xdc, 0x64, 0x7c, 0xc9, 0x31, 0x48, 0x2e, 0x0b, 0x5d, 0x12, 0xd4, 0xa3, + 0x9b, 0x4d, 0x72, 0x1e, 0x1c, 0xc9, 0xde, 0xbd, 0x48, 0xec, 0xbf, 0xfe, 0xfa, 0xea, 0xf5, 0xd7, + 0xf7, 0xff, 0xf5, 0x57, 0x0d, 0x5c, 0xfc, 0x6d, 0xa7, 0x4d, 0x96, 0xe9, 0x3f, 0xbd, 0x36, 0xfb, + 0xaf, 0xd4, 0x73, 0x1d, 0xcf, 0x41, 0xcf, 0x19, 0x8e, 0x71, 0xe0, 0x3a, 0xba, 0xb1, 0x5f, 0x22, + 0xf7, 0x3a, 0x25, 0x56, 0xc2, 0xb5, 0xf2, 0x17, 0xb0, 0xeb, 0x3a, 0x2e, 0xd5, 0xe7, 0x3f, 0x78, + 0x8d, 0xfc, 0xe2, 0x9e, 0xb3, 0xe7, 0xb0, 0x9f, 0xcb, 0xf4, 0x97, 0x90, 0x22, 0x86, 0xd1, 0x6b, + 0x2f, 0x9b, 0xba, 0xa7, 0x0b, 0x59, 0xce, 0x97, 0x59, 0xce, 0x1b, 0xbb, 0x8e, 0xdb, 0xd5, 0x3d, + 0x1f, 0xe3, 0x45, 0x72, 0xaf, 0xb3, 0x6c, 0xe8, 0x9e, 0xde, 0x71, 0xf6, 0x96, 0x4d, 0x4c, 0x8c, + 0x5e, 0x7b, 0x99, 0x78, 0x6e, 0xdf, 0xf0, 0xfa, 0x2e, 0x36, 0x85, 0x52, 0xe1, 0x18, 0x25, 0x0f, + 0xdb, 0xba, 0xed, 0xf9, 0xf8, 0x7d, 0xcf, 0xea, 0x2c, 0xef, 0x77, 0x8c, 0x65, 0xcf, 0xea, 0x62, + 0xe2, 0xe9, 0xdd, 0x9e, 0x28, 0xf9, 0x16, 0xad, 0x4a, 0x8c, 0x7d, 0xdc, 0xd5, 0x8d, 0x7d, 0xdd, + 0xde, 0xc3, 0xee, 0x32, 0x6f, 0xc3, 0xe8, 0xb5, 0x85, 0xca, 0x4b, 0x46, 0xa7, 0x4f, 0x3c, 0xec, + 0xde, 0xc7, 0x2e, 0xb1, 0x1c, 0x7b, 0x59, 0x7c, 0x6a, 0xe2, 0x9b, 0x6b, 0x15, 0x7f, 0x0f, 0x66, + 0x6f, 0x63, 0x9d, 0x60, 0xf4, 0x29, 0x24, 0x6c, 0xc7, 0xc4, 0x9a, 0x65, 0xe6, 0xa4, 0x25, 0xe9, + 0x4a, 0xaa, 0x52, 0x3e, 0x1a, 0x14, 0xe2, 0x1b, 0x8e, 0x89, 0x1b, 0xb5, 0x27, 0x83, 0xc2, 0x5b, + 0x7b, 0x96, 0xb7, 0xdf, 0x6f, 0x97, 0x0c, 0xa7, 0xbb, 0x1c, 0xcc, 0xa8, 0xd9, 0x1e, 0xfe, 0x5e, + 0xee, 0x1d, 0xec, 0x2d, 0x8b, 0xf9, 0x28, 0xf1, 0x6a, 0x6a, 0x9c, 0x22, 0x36, 0x4c, 0xb4, 0x08, + 0xb3, 0xb8, 0xe7, 0x18, 0xfb, 0xb9, 0xc8, 0x92, 0x74, 0x25, 0xaa, 0xf2, 0x8f, 0x1b, 0xb1, 0xff, + 0xfa, 0x8b, 0x82, 0x54, 0xfc, 0x69, 0x04, 0x2e, 0x55, 0x74, 0xe3, 0xa0, 0xdf, 0xab, 0xdb, 0x86, + 0x7b, 0xd8, 0xf3, 0x2c, 0xc7, 0xde, 0x64, 0xff, 0x12, 0xa4, 0x40, 0xf4, 0x00, 0x1f, 0xb2, 0xfe, + 0xcc, 0xab, 0xf4, 0x27, 0x7a, 0x1f, 0x62, 0x5d, 0xc7, 0xc4, 0x0c, 0x28, 0x7d, 0xfd, 0x6a, 0xe9, + 0xc4, 0xc5, 0x2d, 0x0d, 0xd1, 0xd6, 0x1d, 0x13, 0xab, 0xac, 0x1a, 0x6a, 0x83, 0x7c, 0xd0, 0x25, + 0x9a, 0x65, 0xef, 0x3a, 0xb9, 0xe8, 0x92, 0x74, 0x65, 0xee, 0xfa, 0x8d, 0x53, 0x20, 0x4e, 0xe8, + 0x56, 0x69, 0x6d, 0xbd, 0xd5, 0xb0, 0x77, 0x9d, 0xca, 0xdc, 0xd1, 0xa0, 0x90, 0x10, 0x1f, 0x6a, + 0xe2, 0xa0, 0x4b, 0xe8, 0x8f, 0xfc, 0x26, 0xf8, 0x32, 0xda, 0xff, 0xbe, 0x6b, 0xb1, 0xfe, 0x27, + 0x55, 0xfa, 0x13, 0xbd, 0x0e, 0x08, 0x73, 0x3c, 0x6c, 0x6a, 0xd4, 0x92, 0x34, 0x3a, 0xc0, 0x08, + 0x1b, 0xa0, 0x12, 0x94, 0xd4, 0x74, 0x4f, 0x5f, 0xc3, 0x87, 0x7c, 0x86, 0xc4, 0x3c, 0xfd, 0x7e, + 0x14, 0xd2, 0xc3, 0xae, 0x30, 0xf8, 0x55, 0x88, 0x33, 0x13, 0xc0, 0xac, 0x85, 0xf4, 0xf5, 0x37, + 0xa7, 0x9a, 0x0e, 0x5a, 0xb5, 0xd4, 0x62, 0xf5, 0x54, 0x51, 0x1f, 0x21, 0x88, 0x11, 0xbd, 0xe3, + 0x89, 0x8e, 0xb0, 0xdf, 0xe8, 0xcf, 0x24, 0x58, 0x1a, 0xef, 0x51, 0xe5, 0x70, 0x6d, 0xbd, 0xb5, + 0xae, 0x53, 0x3b, 0x5a, 0xc3, 0x87, 0x8d, 0x5a, 0x2e, 0xba, 0x14, 0xbd, 0x32, 0x77, 0x7d, 0x73, + 0xfa, 0x86, 0xeb, 0x67, 0x20, 0xd6, 0x6d, 0xcf, 0x3d, 0x54, 0xcf, 0x6c, 0x38, 0xdf, 0x82, 0x97, + 0xa7, 0x82, 0x0a, 0xdb, 0x50, 0x92, 0xdb, 0xd0, 0x22, 0xcc, 0xde, 0xd7, 0x3b, 0x7d, 0x2c, 0x46, + 0xcb, 0x3f, 0x6e, 0x44, 0xde, 0x91, 0x8a, 0x97, 0x20, 0xce, 0x27, 0x06, 0xa5, 0x20, 0x59, 0xae, + 0xb7, 0xae, 0x7f, 0xe7, 0xed, 0x95, 0xea, 0xba, 0x32, 0x23, 0x96, 0xe0, 0xff, 0x24, 0xb8, 0xd8, + 0xf2, 0x5c, 0xac, 0x77, 0x1b, 0xf6, 0x1e, 0x26, 0x74, 0x4c, 0x35, 0xec, 0xe9, 0x56, 0x87, 0x20, + 0x1b, 0xd2, 0x84, 0x95, 0x68, 0xba, 0x69, 0xba, 0x98, 0x10, 0xde, 0x60, 0x65, 0xe5, 0xc9, 0xa0, + 0x50, 0x9d, 0x6a, 0xeb, 0x18, 0x46, 0x67, 0x99, 0x43, 0x58, 0xf6, 0x9e, 0x61, 0x74, 0x4a, 0xbc, + 0xa5, 0x32, 0x87, 0x53, 0x53, 0x24, 0xfc, 0x89, 0xbe, 0x0d, 0x31, 0xd2, 0xd3, 0x6d, 0x36, 0x84, + 0xb9, 0xeb, 0x97, 0x42, 0xf3, 0xef, 0x6f, 0xc1, 0x56, 0x4f, 0xb7, 0x2b, 0xb1, 0x2f, 0x06, 0x85, + 0x19, 0x95, 0xa9, 0xa2, 0x0a, 0x00, 0xf1, 0x74, 0xd7, 0xd3, 0x28, 0x97, 0x08, 0xeb, 0x7f, 0x21, + 0x54, 0x91, 0x72, 0x4d, 0x69, 0xbf, 0x63, 0x94, 0xb6, 0x7d, 0xae, 0x11, 0xd5, 0x93, 0xac, 0x1a, + 0x95, 0x16, 0x9f, 0x83, 0x4b, 0x63, 0x13, 0xb0, 0xe5, 0x3a, 0x7b, 0xb4, 0x47, 0xc5, 0x7f, 0x8a, + 0x43, 0x8a, 0x6f, 0x18, 0x7f, 0x4e, 0x46, 0x1b, 0x94, 0x9e, 0xa6, 0x41, 0x74, 0x13, 0x64, 0x6c, + 0x9b, 0x1c, 0x21, 0x32, 0x3d, 0x42, 0x02, 0xdb, 0x26, 0xab, 0xff, 0x1c, 0xdf, 0x81, 0x51, 0xb6, + 0x18, 0x89, 0xa3, 0x41, 0x21, 0xba, 0xa3, 0x36, 0xf8, 0x56, 0x7c, 0x0f, 0xf2, 0x26, 0xee, 0xb9, + 0xd8, 0xd0, 0xe9, 0x5e, 0x6c, 0xb3, 0xae, 0x6b, 0x5d, 0xdd, 0xb6, 0x76, 0x31, 0xf1, 0x72, 0x31, + 0x66, 0x1b, 0xb9, 0xa1, 0x06, 0x1f, 0xdb, 0xba, 0x28, 0x47, 0x7f, 0x20, 0xc1, 0x42, 0xdf, 0xb5, + 0x88, 0xd6, 0x3e, 0xd4, 0x3a, 0x8e, 0xa1, 0x77, 0x2c, 0xef, 0x50, 0x3b, 0xb8, 0x9f, 0x9b, 0x65, + 0x1b, 0xe2, 0xe6, 0x99, 0xac, 0x22, 0x26, 0xa9, 0xb4, 0xe3, 0x5a, 0xa4, 0x72, 0x78, 0x5b, 0x20, + 0xac, 0xdd, 0x67, 0x46, 0x5b, 0x59, 0x3c, 0x1a, 0x14, 0x94, 0x1d, 0xb5, 0x11, 0x2e, 0xba, 0xab, + 0x2a, 0xfd, 0x31, 0x65, 0xa4, 0x07, 0x74, 0x62, 0x39, 0xb6, 0xe6, 0x70, 0x7e, 0xca, 0xc5, 0xd9, + 0x44, 0x5d, 0x3f, 0x3f, 0xb3, 0xa9, 0x59, 0x3c, 0xc1, 0xc1, 0x7f, 0x2c, 0x41, 0x9e, 0xba, 0x0a, + 0x6c, 0xd0, 0x69, 0x0a, 0xfc, 0x90, 0xe6, 0x62, 0xc3, 0x71, 0xcd, 0x5c, 0x82, 0xce, 0x53, 0xa5, + 0xf5, 0xef, 0xd3, 0x7a, 0x08, 0xe6, 0xd1, 0xfa, 0x7d, 0xcb, 0x2c, 0xed, 0xec, 0x34, 0x6a, 0x47, + 0x83, 0x42, 0x6e, 0xcb, 0x07, 0x0f, 0x16, 0x51, 0x65, 0xd0, 0x6a, 0xae, 0x77, 0x42, 0x09, 0x7a, + 0x07, 0xd2, 0x86, 0xd3, 0xe9, 0x60, 0x83, 0x0d, 0x7b, 0x47, 0x6d, 0xe4, 0x64, 0xb6, 0xc0, 0xd9, + 0xa3, 0x41, 0x21, 0x55, 0x0d, 0x4a, 0xe8, 0x52, 0xa7, 0x8c, 0xf0, 0x27, 0x52, 0x21, 0x13, 0x9a, + 0x30, 0xe6, 0x07, 0x92, 0x6c, 0xb6, 0xae, 0x4e, 0x4d, 0x61, 0x6a, 0x1a, 0x8f, 0x7c, 0xe7, 0xab, + 0x70, 0xe1, 0xd8, 0x55, 0x3c, 0x8b, 0x7a, 0x92, 0x61, 0xea, 0x51, 0x20, 0xcd, 0x17, 0x25, 0xd8, + 0x50, 0x9f, 0xa7, 0x21, 0xad, 0x62, 0xe2, 0x39, 0x2e, 0xf6, 0x77, 0xd4, 0xe7, 0x12, 0x2c, 0xd0, + 0x38, 0xc1, 0xb5, 0x7a, 0x9e, 0xe3, 0x6a, 0x2e, 0x7e, 0xe0, 0x5a, 0x1e, 0x26, 0xb9, 0x08, 0x33, + 0xba, 0xf2, 0x29, 0x43, 0x18, 0x05, 0x2a, 0xd5, 0x02, 0x10, 0x55, 0x60, 0x70, 0xbb, 0xbb, 0xf9, + 0xc3, 0x9f, 0x17, 0x6e, 0x4c, 0xb5, 0x8e, 0x93, 0xa1, 0x4b, 0xa9, 0x51, 0x53, 0x91, 0x39, 0x01, + 0x8c, 0x9e, 0x87, 0x18, 0xb5, 0x5b, 0xe6, 0x2a, 0x92, 0x15, 0xf9, 0x68, 0x50, 0x88, 0x51, 0xcb, + 0x56, 0x99, 0x74, 0x64, 0x83, 0xc7, 0x9e, 0x62, 0x83, 0xaf, 0xc0, 0x9c, 0xa7, 0xb7, 0x3b, 0x58, + 0xa3, 0x2d, 0x13, 0xb1, 0xfd, 0x5e, 0x19, 0x9b, 0x09, 0x72, 0xaf, 0xd3, 0xd6, 0x09, 0x2e, 0x6d, + 0x53, 0xcd, 0xd0, 0xd8, 0xc1, 0xf3, 0x05, 0x04, 0x2d, 0xc3, 0x9c, 0x73, 0x1f, 0xbb, 0xae, 0x65, + 0x62, 0xcd, 0x6c, 0xb3, 0x3d, 0x94, 0xac, 0xa4, 0x8f, 0x06, 0x05, 0xd8, 0x14, 0xe2, 0x5a, 0x45, + 0x05, 0x5f, 0xa5, 0xd6, 0x46, 0x1e, 0x2c, 0x0a, 0xd2, 0x08, 0xf6, 0x3f, 0xb3, 0xa7, 0x04, 0xeb, + 0xc2, 0x7b, 0xd3, 0x2f, 0x06, 0x5f, 0x77, 0xdf, 0x78, 0x58, 0x64, 0xc1, 0x07, 0x89, 0xda, 0x13, + 0x25, 0xe8, 0x35, 0xc8, 0xf6, 0x5c, 0xdc, 0xd3, 0x5d, 0xac, 0x19, 0x4e, 0xb7, 0xd7, 0xc1, 0x1e, + 0x36, 0x99, 0xf5, 0xcb, 0xaa, 0x22, 0x0a, 0xaa, 0xbe, 0x1c, 0xbd, 0x4c, 0xbd, 0x92, 0xee, 0xd1, + 0x80, 0x87, 0x60, 0x97, 0x6a, 0x26, 0x99, 0x66, 0x8a, 0x49, 0x1b, 0x42, 0x88, 0xde, 0x82, 0x0b, + 0xc3, 0x75, 0x23, 0x5a, 0xaf, 0xdf, 0xee, 0x58, 0x64, 0x1f, 0x9b, 0x39, 0x60, 0xda, 0x8b, 0xa1, + 0xc2, 0x2d, 0xbf, 0x0c, 0x1d, 0x8e, 0x98, 0xa2, 0x41, 0x27, 0x46, 0xdf, 0xc3, 0xb9, 0xb9, 0x25, + 0xe9, 0xca, 0x6c, 0x65, 0xf5, 0xc9, 0xa0, 0x50, 0x9b, 0xda, 0x8e, 0x08, 0xee, 0x2e, 0x7b, 0x2e, + 0xc6, 0x21, 0xb3, 0xac, 0x0a, 0xbc, 0xb0, 0x45, 0xf9, 0x32, 0xa4, 0x02, 0x0c, 0xb7, 0x60, 0x6e, + 0xfe, 0xa9, 0xd9, 0x2e, 0x84, 0x82, 0xca, 0x90, 0xe0, 0xa1, 0x37, 0xc9, 0xa5, 0xd8, 0x02, 0x7e, + 0xeb, 0x24, 0x1b, 0x62, 0x5a, 0xa1, 0x55, 0xf2, 0xeb, 0xa1, 0x1a, 0x80, 0x77, 0xd8, 0xf3, 0x2d, + 0x31, 0xcd, 0x50, 0x5e, 0x3e, 0x09, 0xe5, 0xb0, 0x17, 0x36, 0xc4, 0xa4, 0x27, 0xbe, 0x09, 0x6a, + 0xc2, 0x3c, 0x8f, 0xeb, 0x05, 0x4e, 0x86, 0xe1, 0xbc, 0x7a, 0x02, 0x0e, 0x0b, 0x57, 0xf4, 0x10, + 0xd2, 0x1c, 0x09, 0x24, 0x04, 0x6d, 0x41, 0x9a, 0xc6, 0x98, 0x54, 0x53, 0xa0, 0x29, 0x0c, 0xed, + 0xea, 0x09, 0x68, 0x35, 0xa1, 0x1c, 0xc2, 0x4b, 0x99, 0x21, 0x19, 0xc9, 0xff, 0xaf, 0x04, 0xd9, + 0x09, 0xf2, 0x40, 0xdb, 0x10, 0x09, 0x8e, 0x0d, 0x94, 0xd3, 0x23, 0xec, 0xc8, 0xf0, 0x2c, 0x44, + 0x12, 0xb1, 0x4c, 0xb4, 0x07, 0x49, 0x6a, 0xce, 0xb6, 0x47, 0xcf, 0x24, 0x11, 0x06, 0xde, 0x3c, + 0x1a, 0x14, 0xe4, 0x2d, 0x26, 0x7c, 0xe6, 0x26, 0x64, 0x0e, 0xde, 0x30, 0x51, 0x01, 0xe6, 0x3c, + 0x47, 0xc3, 0x0f, 0x2d, 0xe2, 0x59, 0xf6, 0x1e, 0x0b, 0x16, 0x64, 0x15, 0x3c, 0xa7, 0x2e, 0x24, + 0xf9, 0x3f, 0x8f, 0x00, 0x9a, 0xdc, 0xa5, 0xe8, 0x1f, 0x25, 0x78, 0xde, 0x8f, 0x01, 0x1c, 0xd7, + 0xda, 0xb3, 0x6c, 0xbd, 0x33, 0x12, 0x0c, 0x48, 0x6c, 0xb6, 0x3f, 0x7d, 0x16, 0x2a, 0x10, 0x01, + 0xc2, 0xa6, 0x80, 0x1f, 0x0f, 0x14, 0x9e, 0xa7, 0x1e, 0x94, 0x07, 0x0a, 0x13, 0x2a, 0x77, 0xd5, + 0x5c, 0xff, 0x84, 0xca, 0xf9, 0x35, 0x78, 0xe1, 0x54, 0xe0, 0xf3, 0xf8, 0xae, 0xfc, 0x0f, 0x25, + 0xb8, 0x74, 0x82, 0x47, 0x09, 0xe3, 0xa4, 0x38, 0xce, 0x9d, 0x30, 0xce, 0xdc, 0xf5, 0xdf, 0x78, + 0x06, 0xaf, 0x15, 0xea, 0x44, 0x33, 0x26, 0x4b, 0x4a, 0xa4, 0xf8, 0x26, 0x64, 0x44, 0x25, 0xdf, + 0x8f, 0xa2, 0x17, 0x00, 0xf6, 0xad, 0xbd, 0x7d, 0xed, 0x81, 0xee, 0x61, 0x57, 0x9c, 0x25, 0x93, + 0x54, 0xf2, 0x11, 0x15, 0x14, 0xff, 0x4d, 0x86, 0x54, 0xa3, 0xdb, 0x73, 0x5c, 0xcf, 0xf7, 0xb2, + 0xb7, 0x21, 0xce, 0xfc, 0x02, 0x11, 0xeb, 0x57, 0x3a, 0xa5, 0x87, 0x23, 0x35, 0xb9, 0x7f, 0x11, + 0xb4, 0x20, 0x30, 0x02, 0xf7, 0x17, 0x39, 0xd6, 0xfd, 0xbd, 0x0f, 0x71, 0x9e, 0x38, 0x10, 0x01, + 0x79, 0xe1, 0x98, 0x48, 0xbe, 0xb1, 0x79, 0xcb, 0xea, 0xe0, 0x5b, 0x4c, 0xcd, 0x07, 0xe7, 0x95, + 0xd0, 0x2b, 0x20, 0x13, 0xe2, 0x69, 0xc4, 0xfa, 0x01, 0xf7, 0x9e, 0x51, 0x7e, 0x26, 0x6d, 0xb5, + 0xb6, 0x5b, 0xd6, 0x0f, 0xb0, 0x9a, 0x20, 0xc4, 0xa3, 0x3f, 0x50, 0x1e, 0xe4, 0x07, 0x7a, 0xa7, + 0xc3, 0xbc, 0xec, 0x2c, 0x3b, 0x83, 0x07, 0xdf, 0xa3, 0xdb, 0x2c, 0xfe, 0xcd, 0x6e, 0x33, 0xe1, + 0x30, 0x7b, 0xba, 0xb7, 0xcf, 0x22, 0xc7, 0xa4, 0x0a, 0x5c, 0xb4, 0xa5, 0x7b, 0xfb, 0x28, 0x07, + 0x09, 0xa2, 0x53, 0xdf, 0x45, 0x72, 0xf2, 0x52, 0xf4, 0xca, 0xbc, 0xea, 0x7f, 0xa2, 0xcb, 0xc0, + 0x3c, 0x2f, 0xff, 0x64, 0x4e, 0x2c, 0xaa, 0x86, 0x24, 0x6c, 0x1e, 0x0e, 0xac, 0x9e, 0xb6, 0x7b, + 0x40, 0xb8, 0xd3, 0x12, 0xf3, 0x70, 0x60, 0xf5, 0x6e, 0xad, 0x11, 0x35, 0x41, 0x0b, 0x6f, 0x1d, + 0x10, 0xf4, 0x2a, 0x64, 0x2c, 0x76, 0x72, 0xd1, 0x4c, 0xcb, 0xc5, 0x86, 0xd7, 0x39, 0x64, 0x0e, + 0x4b, 0x56, 0xd3, 0x5c, 0x5c, 0x13, 0x52, 0x74, 0x15, 0x94, 0x71, 0x37, 0xcb, 0x1c, 0x8d, 0xac, + 0x66, 0xc6, 0xbc, 0x2c, 0x55, 0xe5, 0x4b, 0x1d, 0x72, 0x9c, 0x29, 0xae, 0xca, 0xe5, 0x43, 0x9f, + 0x59, 0x82, 0x85, 0x9e, 0xee, 0x12, 0xac, 0xb5, 0xfb, 0xb6, 0xd9, 0xc1, 0x1a, 0xe7, 0xea, 0x5c, + 0x9a, 0x69, 0x67, 0x59, 0x51, 0x85, 0x95, 0x70, 0x5a, 0x3f, 0x2b, 0xf6, 0xbe, 0xf8, 0x2b, 0x88, + 0xbd, 0xf3, 0x3f, 0x8d, 0xc0, 0x2c, 0xb3, 0x73, 0x74, 0x03, 0x62, 0x74, 0x99, 0xc5, 0xc9, 0x6e, + 0xda, 0x98, 0x8b, 0xd5, 0x41, 0x08, 0x62, 0xb6, 0xde, 0xc5, 0x39, 0xc4, 0x8c, 0x80, 0xfd, 0x46, + 0x97, 0x20, 0x41, 0xf0, 0x3d, 0xed, 0xbe, 0xde, 0xc9, 0x2d, 0xb0, 0x15, 0x8e, 0x13, 0x7c, 0xef, + 0xae, 0xde, 0x41, 0x17, 0x20, 0x6e, 0x11, 0xcd, 0xc6, 0x0f, 0x72, 0x8b, 0x6c, 0xa6, 0x66, 0x2d, + 0xb2, 0x81, 0x1f, 0x30, 0xda, 0xd6, 0xdd, 0x3d, 0xec, 0x69, 0x86, 0xd3, 0x21, 0xb9, 0x0b, 0x74, + 0x83, 0xd1, 0x90, 0x8e, 0x8a, 0xaa, 0x4e, 0x87, 0xa0, 0x5f, 0x83, 0xe4, 0x03, 0x9d, 0x68, 0xb8, + 0xdb, 0xf3, 0x0e, 0xd9, 0x64, 0xc9, 0xd4, 0xec, 0x49, 0x9d, 0x7e, 0x37, 0x63, 0x72, 0x44, 0x89, + 0x36, 0x63, 0x72, 0x54, 0x89, 0x35, 0x63, 0x72, 0x4c, 0x99, 0x6d, 0xc6, 0xe4, 0x59, 0x25, 0xde, + 0x8c, 0xc9, 0x71, 0x25, 0xd1, 0x8c, 0xc9, 0x09, 0x45, 0x6e, 0xc6, 0x64, 0x59, 0x49, 0x36, 0x63, + 0x72, 0x52, 0x81, 0x66, 0x4c, 0x06, 0x65, 0xae, 0x19, 0x93, 0xe7, 0x94, 0xf9, 0x66, 0x4c, 0x9e, + 0x57, 0x52, 0xcd, 0x98, 0x9c, 0x52, 0xd2, 0xcd, 0x98, 0x9c, 0x56, 0x32, 0xcd, 0x98, 0x9c, 0x51, + 0x94, 0x66, 0x4c, 0x56, 0x94, 0x6c, 0x33, 0x26, 0x67, 0x15, 0x54, 0xfc, 0x5c, 0x02, 0xa5, 0x85, + 0xef, 0xf5, 0xb1, 0x6d, 0xe0, 0xbb, 0x7a, 0xa7, 0xba, 0xdf, 0xb7, 0x0f, 0xd0, 0x2b, 0x90, 0x31, + 0xe8, 0x0f, 0x8d, 0x1f, 0x8c, 0xe9, 0x50, 0x25, 0x36, 0xd4, 0x14, 0x13, 0xb7, 0xa8, 0x94, 0x8e, + 0xf8, 0x05, 0x00, 0xa1, 0x47, 0x77, 0x36, 0xcf, 0x9a, 0x25, 0xb9, 0x0a, 0xdd, 0xce, 0x63, 0x30, + 0xae, 0xf3, 0x80, 0xd1, 0xc7, 0x08, 0x8c, 0xea, 0x3c, 0x40, 0xcb, 0xb0, 0x68, 0xe3, 0x87, 0x9e, + 0x36, 0xae, 0xcc, 0xa8, 0x42, 0xcd, 0xd2, 0xb2, 0x6a, 0xb8, 0x42, 0xf1, 0x5f, 0x23, 0x90, 0xf1, + 0x3b, 0xed, 0xd3, 0xe1, 0x2e, 0x28, 0x74, 0x59, 0x2c, 0x53, 0xf3, 0x1c, 0x8e, 0xe4, 0x13, 0xe3, + 0xfb, 0xa7, 0x10, 0xe3, 0x18, 0x0a, 0xfd, 0x6e, 0x98, 0xdb, 0x0e, 0x6b, 0x8e, 0xbb, 0x06, 0x35, + 0x45, 0xc2, 0xb2, 0xfc, 0x0e, 0xa4, 0xfd, 0x4a, 0x5c, 0x82, 0xaa, 0x10, 0x1f, 0x69, 0xef, 0xb5, + 0x29, 0xda, 0xf3, 0xa7, 0x5a, 0x15, 0x55, 0xf3, 0xbf, 0x03, 0x68, 0xb2, 0xed, 0xb0, 0x5b, 0x9a, + 0xe5, 0x6e, 0x69, 0x73, 0xd4, 0x2d, 0xbd, 0x7b, 0xbe, 0xb1, 0x85, 0xba, 0x1d, 0x3e, 0xd5, 0xfd, + 0x73, 0x04, 0xd2, 0xdc, 0x45, 0x04, 0xee, 0xe8, 0x35, 0xc8, 0x32, 0xd2, 0xb2, 0xec, 0x3d, 0xad, + 0x27, 0x84, 0x6c, 0x7c, 0x11, 0x55, 0xf1, 0x0b, 0x02, 0xe5, 0x17, 0x21, 0xe5, 0x62, 0xdd, 0x1c, + 0x2a, 0x46, 0x98, 0xe2, 0x3c, 0x15, 0x06, 0x4a, 0x2f, 0x43, 0x9a, 0x79, 0xc3, 0xa1, 0x56, 0x94, + 0x69, 0xa5, 0x98, 0x34, 0x50, 0xab, 0x40, 0x8a, 0xf4, 0x74, 0x7b, 0xa8, 0x15, 0x63, 0x93, 0x7a, + 0x46, 0xee, 0x68, 0x9e, 0xd6, 0x09, 0xfb, 0x52, 0x17, 0x93, 0x7e, 0x17, 0x6b, 0x3d, 0x87, 0x1f, + 0xb6, 0xa2, 0x6a, 0x92, 0x4b, 0xb6, 0x1c, 0x82, 0x76, 0x98, 0xa9, 0xb0, 0xb9, 0xd0, 0x4c, 0x3e, + 0x39, 0xb9, 0x38, 0x6b, 0xe5, 0xda, 0xf4, 0xd3, 0xa9, 0x66, 0xc8, 0xa8, 0xa0, 0xf8, 0xb7, 0x12, + 0x5c, 0xa2, 0x01, 0x33, 0x67, 0xc5, 0x2a, 0xcb, 0x77, 0xfb, 0xd6, 0xa9, 0x43, 0x82, 0x05, 0xdd, + 0x41, 0xfc, 0xb9, 0x7a, 0x34, 0x28, 0xc4, 0xa9, 0xf6, 0x33, 0x7b, 0xae, 0x38, 0x05, 0x6e, 0xb0, + 0xe3, 0x91, 0xe7, 0xea, 0x36, 0xb1, 0xe8, 0x41, 0x81, 0x2e, 0x5b, 0x17, 0x77, 0xdb, 0xd8, 0xe5, + 0x8b, 0x31, 0xaf, 0x2e, 0x8e, 0x14, 0xae, 0xf3, 0xb2, 0x62, 0x1e, 0x72, 0xe3, 0x5d, 0x0e, 0x4e, + 0xf6, 0xbf, 0x09, 0x17, 0x37, 0xf0, 0x83, 0xe3, 0x46, 0x53, 0x81, 0x04, 0xe7, 0x2f, 0xdf, 0xe4, + 0xaf, 0x8c, 0xb3, 0x6a, 0x38, 0xe5, 0x5f, 0x62, 0x3d, 0xdd, 0x66, 0x15, 0x54, 0xbf, 0x62, 0xf1, + 0x53, 0xb8, 0x34, 0x86, 0x1e, 0x2c, 0xdf, 0x07, 0x10, 0xa7, 0x27, 0x3f, 0x11, 0xd9, 0xa4, 0x27, + 0x4f, 0x15, 0x93, 0xe8, 0x2d, 0xaa, 0xaf, 0x8a, 0x6a, 0x45, 0x95, 0xa5, 0x24, 0xfa, 0x5d, 0x4c, + 0x2d, 0xe4, 0xb6, 0x45, 0x3c, 0xf4, 0x21, 0xcc, 0x0b, 0x8b, 0xa0, 0x86, 0xe2, 0x77, 0xfb, 0x0c, + 0xa3, 0x9a, 0x73, 0x03, 0x10, 0x52, 0xfc, 0x3b, 0x09, 0x16, 0x6a, 0xae, 0xd3, 0xeb, 0x61, 0x53, + 0xf8, 0x0a, 0x3e, 0x17, 0xbe, 0x8b, 0x90, 0x42, 0x2e, 0x62, 0x03, 0x22, 0x8d, 0x9a, 0x38, 0x0b, + 0xdc, 0x7c, 0xd6, 0x23, 0x46, 0xa3, 0x86, 0xde, 0xe5, 0x13, 0xd2, 0x27, 0x8c, 0x3f, 0xd3, 0x13, + 0x87, 0xbe, 0x11, 0x33, 0x65, 0x8a, 0xaa, 0xa8, 0x50, 0xfc, 0x49, 0x02, 0x2e, 0x84, 0x27, 0x79, + 0xa5, 0xea, 0x77, 0xfc, 0x33, 0x48, 0x58, 0xb6, 0x89, 0x1f, 0xe2, 0xa9, 0x78, 0xf2, 0x38, 0x88, + 0x92, 0x98, 0x8f, 0x06, 0x85, 0xf1, 0x8f, 0x99, 0x02, 0x13, 0x7d, 0x1c, 0x84, 0xa7, 0x3c, 0xed, + 0x73, 0xe3, 0xa9, 0xd1, 0x6b, 0x63, 0xa1, 0xea, 0x48, 0x24, 0xc8, 0x1c, 0xca, 0x37, 0x14, 0x09, + 0xb6, 0x20, 0x6b, 0xd9, 0x1e, 0x76, 0x3b, 0x58, 0xbf, 0x4f, 0x03, 0x1b, 0xda, 0xbc, 0xc8, 0xfe, + 0x4c, 0x1b, 0x46, 0x28, 0x21, 0x00, 0x1e, 0x8e, 0x7c, 0x06, 0x0b, 0x61, 0x50, 0x7f, 0x09, 0x4e, + 0xcf, 0x08, 0xb1, 0x19, 0x1e, 0xc2, 0xfa, 0x89, 0x97, 0x10, 0x50, 0x43, 0x4c, 0xfb, 0x5d, 0x88, + 0xf3, 0x83, 0xbe, 0x48, 0xaf, 0xde, 0x7c, 0xda, 0x69, 0xe7, 0x09, 0x04, 0x55, 0xa0, 0xe5, 0xff, + 0x54, 0x82, 0xf9, 0xf0, 0x72, 0x23, 0x0b, 0x64, 0xd6, 0x77, 0x9f, 0xd2, 0xa2, 0x95, 0x0d, 0x1a, + 0xcb, 0xb2, 0x42, 0xb6, 0x06, 0x1f, 0x3c, 0xf5, 0x1a, 0x70, 0x08, 0x61, 0x4a, 0x0d, 0x93, 0x06, + 0x48, 0xa6, 0xeb, 0xf4, 0x86, 0xe9, 0xf5, 0xa8, 0x2a, 0x53, 0x01, 0x8d, 0xf9, 0xf2, 0xbf, 0x0b, + 0xc9, 0xc0, 0x50, 0x42, 0x27, 0xfc, 0xe8, 0xd7, 0x78, 0xc2, 0x3f, 0xb5, 0xfd, 0x1a, 0xa4, 0x46, + 0x66, 0x0c, 0x5d, 0x0c, 0xfa, 0x10, 0xab, 0xc4, 0x79, 0x1f, 0xce, 0x44, 0x29, 0xfe, 0x3c, 0x0e, + 0x0b, 0xc7, 0x31, 0xed, 0x27, 0xa0, 0x84, 0x78, 0x4b, 0xeb, 0x58, 0xc4, 0x13, 0xfb, 0xe9, 0xea, + 0xe9, 0x07, 0xd2, 0x10, 0xf9, 0x09, 0x6b, 0x49, 0xbb, 0xa3, 0x94, 0xf8, 0x3d, 0x48, 0x9b, 0xbc, + 0xe3, 0x9a, 0xd8, 0xa8, 0xd1, 0x33, 0xcf, 0x91, 0xc7, 0x10, 0xa0, 0x40, 0x4f, 0x99, 0xa1, 0x22, + 0xc2, 0xee, 0x1d, 0x7c, 0xf4, 0x20, 0xb7, 0x63, 0x99, 0x6c, 0xf7, 0xa4, 0x2a, 0xad, 0xa3, 0x41, + 0x21, 0x2b, 0xb0, 0xfc, 0x64, 0xce, 0x33, 0xaf, 0x54, 0xd6, 0x1c, 0x03, 0x34, 0xa9, 0xd7, 0xa5, + 0xe5, 0xb4, 0xe1, 0xd9, 0xa1, 0xd7, 0xa5, 0xfb, 0xe8, 0xd9, 0xbd, 0x2e, 0xfd, 0xd9, 0x30, 0xd1, + 0x1f, 0x4a, 0x90, 0xe5, 0x99, 0xdd, 0x6e, 0xdf, 0xd3, 0x79, 0xba, 0xde, 0x3f, 0x9f, 0x7e, 0x72, + 0x34, 0x28, 0x64, 0xd8, 0x84, 0xac, 0x8b, 0x32, 0xd6, 0x6c, 0xe5, 0x69, 0x9b, 0x1d, 0xa2, 0x88, + 0x33, 0x5b, 0x20, 0x30, 0xd1, 0x1a, 0xa4, 0xf9, 0x61, 0xdb, 0xbf, 0x38, 0x67, 0x07, 0xd7, 0x54, + 0xe5, 0xa5, 0x27, 0x83, 0xc2, 0xd2, 0x31, 0x96, 0xc5, 0xcf, 0xe9, 0x77, 0xb9, 0xae, 0x9a, 0xda, + 0x0d, 0x7f, 0x22, 0x03, 0x52, 0x81, 0x69, 0x1c, 0xf6, 0xc4, 0x39, 0xf7, 0xd9, 0x5d, 0xd9, 0xbc, + 0x6f, 0x23, 0x14, 0x13, 0xed, 0x41, 0xc6, 0x6f, 0x84, 0x3b, 0x74, 0x92, 0x4b, 0x7e, 0x2d, 0xcd, + 0xf8, 0x66, 0xcd, 0x47, 0x4d, 0x44, 0xca, 0xe5, 0x22, 0x2c, 0x1e, 0x1b, 0xe5, 0x7c, 0x19, 0x87, + 0x8b, 0xa3, 0x44, 0x18, 0xc4, 0x21, 0xda, 0xb8, 0x87, 0xfc, 0x60, 0x6a, 0x32, 0xf5, 0x31, 0x38, + 0x99, 0xf9, 0x5f, 0xe3, 0x3e, 0xf2, 0xb3, 0x31, 0x1f, 0xf9, 0x14, 0xf8, 0xcc, 0xbc, 0xc6, 0xf0, + 0x7d, 0x47, 0xf9, 0x71, 0xe0, 0x0b, 0x78, 0xd6, 0xe6, 0xc3, 0xa7, 0x80, 0x67, 0xf5, 0xfd, 0xcf, + 0xc0, 0x1b, 0xfc, 0x8b, 0x04, 0xa9, 0x91, 0x91, 0xfd, 0x32, 0xdd, 0xc1, 0x56, 0x10, 0x0d, 0xf1, + 0xe7, 0x15, 0xef, 0x9c, 0x7f, 0x58, 0xa3, 0x41, 0x52, 0xfe, 0x1f, 0x24, 0x48, 0x8d, 0x4c, 0xe4, + 0x37, 0xe4, 0x48, 0xbe, 0xfe, 0x9e, 0xb7, 0x21, 0x3d, 0xba, 0x44, 0xa1, 0x36, 0xa4, 0xaf, 0xa7, + 0x8d, 0xe2, 0x77, 0x21, 0xce, 0x25, 0x08, 0x41, 0xfa, 0xa3, 0x72, 0x63, 0xbb, 0xb1, 0xb1, 0xa2, + 0xdd, 0xda, 0x54, 0xb5, 0x95, 0xaa, 0x32, 0x83, 0xe6, 0x41, 0xae, 0xd5, 0x6f, 0xd7, 0xa9, 0x50, + 0x91, 0xd0, 0x1c, 0x24, 0xd8, 0x57, 0xbd, 0xa6, 0x44, 0x8a, 0x15, 0x50, 0x38, 0xf6, 0x2e, 0xa6, + 0x8e, 0x81, 0xc6, 0xfd, 0xa8, 0x04, 0x0b, 0x2c, 0x48, 0xef, 0xd2, 0xf8, 0x8d, 0xba, 0x42, 0x2d, + 0x14, 0x3d, 0x67, 0x83, 0x22, 0xea, 0x14, 0x37, 0xf4, 0x2e, 0x2e, 0xfe, 0x7d, 0x0c, 0xb2, 0x43, + 0x10, 0xdf, 0x2d, 0xbe, 0x02, 0x32, 0xb1, 0xec, 0x03, 0x6d, 0x78, 0x69, 0xce, 0x13, 0x69, 0x96, + 0x7d, 0xb0, 0xa3, 0x36, 0xd4, 0x04, 0x2d, 0xdc, 0x71, 0x2d, 0xd4, 0x84, 0x98, 0xd3, 0xf3, 0xfc, + 0x33, 0xe4, 0xdb, 0xa7, 0x4c, 0xc5, 0x44, 0x1b, 0xa5, 0xcd, 0x9e, 0x27, 0x32, 0x00, 0x0c, 0x03, + 0xfd, 0xb5, 0x34, 0x3c, 0xf5, 0xf0, 0xd3, 0xe2, 0xbb, 0xe7, 0xc2, 0xe3, 0x13, 0x20, 0x6e, 0x30, + 0x3f, 0xa2, 0x1b, 0xf5, 0xc9, 0xa0, 0x90, 0x1d, 0x9f, 0x20, 0xf2, 0x8c, 0x57, 0x9b, 0x7e, 0x17, + 0x51, 0x93, 0x5f, 0xaa, 0x0d, 0x27, 0x9a, 0x39, 0x84, 0x29, 0xef, 0x2d, 0x53, 0x23, 0x0b, 0x91, + 0xdf, 0x83, 0xf9, 0x70, 0xef, 0x8f, 0xc9, 0x96, 0x97, 0x47, 0xd3, 0x12, 0xaf, 0x4d, 0x35, 0x33, + 0xe2, 0x48, 0x18, 0x4a, 0xd1, 0x7f, 0x17, 0x92, 0xc1, 0xb4, 0x9f, 0x27, 0xb7, 0xcf, 0x39, 0x3e, + 0x48, 0x96, 0xcd, 0x2a, 0xf1, 0xe2, 0xdf, 0x48, 0x30, 0xaf, 0x62, 0xe2, 0x74, 0xee, 0x63, 0x93, + 0xc6, 0x3c, 0xc1, 0x5b, 0x14, 0x69, 0xfa, 0xb7, 0x28, 0x65, 0x48, 0x06, 0xd9, 0xcc, 0xf3, 0xbc, + 0xeb, 0x18, 0xd6, 0x42, 0x57, 0x41, 0x69, 0x3b, 0x7d, 0xdb, 0xd4, 0xdd, 0x43, 0xcd, 0xc5, 0xba, + 0xb1, 0x8f, 0x4d, 0x71, 0x73, 0x93, 0xf1, 0xe5, 0x2a, 0x17, 0x17, 0x7f, 0x14, 0x01, 0x34, 0x9c, + 0x9c, 0x10, 0x15, 0xd1, 0xc8, 0x8d, 0x8d, 0x43, 0x1c, 0x5e, 0x23, 0xc7, 0xde, 0xb5, 0x8d, 0x05, + 0x80, 0xc1, 0xc0, 0xfd, 0x25, 0x75, 0x43, 0x32, 0x82, 0x7e, 0x7c, 0x7a, 0xce, 0x36, 0xca, 0x72, + 0xb6, 0xcc, 0x4a, 0x7f, 0xa9, 0x79, 0x5b, 0xe1, 0xac, 0xff, 0x27, 0x06, 0xa8, 0xea, 0x62, 0xdd, + 0xc3, 0x94, 0x79, 0xc8, 0x69, 0x67, 0xed, 0x0a, 0xcc, 0xf2, 0x83, 0x59, 0xe4, 0x3c, 0x07, 0x33, + 0x31, 0x29, 0xbc, 0x2a, 0xfa, 0x3e, 0xcc, 0x1b, 0x4e, 0xa7, 0xdf, 0xb5, 0x35, 0x76, 0xe3, 0x2c, + 0x02, 0xe1, 0xef, 0x9c, 0x66, 0xc4, 0x13, 0x9d, 0x2b, 0x55, 0x9d, 0x0e, 0xfd, 0xf6, 0x73, 0x07, + 0x1c, 0x90, 0x69, 0xa0, 0xe7, 0x21, 0x19, 0x6c, 0x28, 0x16, 0x02, 0x27, 0xd5, 0xa1, 0x00, 0x5d, + 0x87, 0x59, 0x9d, 0x68, 0xce, 0x2e, 0x8b, 0x51, 0xcf, 0xb2, 0x30, 0x35, 0xa6, 0x93, 0xcd, 0x5d, + 0xf4, 0x16, 0xa4, 0x76, 0xef, 0xf1, 0xb8, 0x9d, 0x13, 0x28, 0x7f, 0x08, 0x90, 0x39, 0x1a, 0x14, + 0xe6, 0x6e, 0xdd, 0x61, 0x83, 0xa5, 0xf4, 0xa9, 0xce, 0xed, 0xde, 0x0b, 0x3e, 0xd0, 0x35, 0xc8, + 0x76, 0xf5, 0x87, 0xda, 0xae, 0xab, 0x1b, 0x22, 0x50, 0xed, 0x70, 0x56, 0x90, 0xd4, 0x4c, 0x57, + 0x7f, 0x78, 0x4b, 0xc8, 0x1b, 0x66, 0x07, 0xe7, 0xff, 0x5b, 0x82, 0x84, 0x18, 0x11, 0xea, 0x01, + 0x88, 0xe9, 0xb1, 0x4c, 0x1e, 0x0a, 0xa5, 0x2a, 0x77, 0x8e, 0x06, 0x85, 0x64, 0x95, 0x49, 0x1b, + 0x35, 0xf2, 0x64, 0x50, 0xf8, 0xf0, 0x69, 0x49, 0xcb, 0x07, 0x51, 0x93, 0xbc, 0x91, 0x86, 0xc9, + 0x12, 0x8a, 0xfb, 0x3a, 0xd1, 0xf6, 0x2d, 0xe2, 0x39, 0x7b, 0xae, 0xde, 0x65, 0x8b, 0x2b, 0xab, + 0xf3, 0xfb, 0x3a, 0x59, 0xf5, 0x65, 0x28, 0x4f, 0x43, 0x8e, 0xfb, 0xfc, 0xc1, 0x00, 0xdf, 0x52, + 0xc1, 0x37, 0xba, 0x0e, 0x17, 0x82, 0xca, 0x1a, 0x1d, 0x74, 0xbb, 0x6f, 0x1c, 0x60, 0xe6, 0x09, + 0x28, 0x67, 0x2d, 0x04, 0x85, 0xeb, 0xfa, 0xc3, 0x0a, 0x2f, 0x2a, 0x5e, 0x80, 0x85, 0xd0, 0xb2, + 0x06, 0x01, 0x22, 0x06, 0x65, 0xdd, 0xda, 0x73, 0xf5, 0xf0, 0x3b, 0xba, 0x3b, 0x90, 0x19, 0x7b, + 0xa7, 0x2a, 0x68, 0x25, 0x9c, 0x08, 0x1b, 0x7d, 0xd8, 0x5a, 0xaa, 0xf2, 0x4f, 0x3f, 0xe4, 0x4e, + 0x1b, 0x23, 0xdf, 0xc5, 0x05, 0xc8, 0x06, 0xcd, 0x04, 0x6d, 0xff, 0x62, 0x1e, 0x12, 0x5b, 0xfa, + 0x61, 0xc7, 0xd1, 0x4d, 0xb4, 0x04, 0x73, 0xfe, 0x23, 0x03, 0xbf, 0xbd, 0xa4, 0x1a, 0x16, 0x21, + 0x0b, 0xd2, 0x7d, 0x82, 0x5d, 0x6a, 0x0f, 0x1a, 0x7b, 0x36, 0xcb, 0x79, 0xb2, 0x52, 0x79, 0x32, + 0x28, 0xdc, 0x9c, 0x6e, 0x79, 0xb0, 0xd1, 0x77, 0x2d, 0xef, 0xb0, 0xd4, 0xba, 0x73, 0x7b, 0x47, + 0x40, 0xd1, 0x4d, 0xec, 0xa8, 0xa9, 0x7e, 0xf8, 0x53, 0x3c, 0xd9, 0xa0, 0x53, 0xad, 0x75, 0x2d, + 0xc3, 0x75, 0x88, 0x9f, 0xd9, 0x17, 0xd2, 0x75, 0x26, 0x44, 0xaf, 0x42, 0x66, 0xd7, 0xb2, 0xd9, + 0xad, 0x92, 0xaf, 0xc7, 0x93, 0xfa, 0x69, 0x5f, 0x2c, 0x14, 0xef, 0x43, 0x3a, 0xf4, 0x4c, 0x83, + 0x9a, 0x59, 0x9c, 0x99, 0xd9, 0xe6, 0xd1, 0xa0, 0x90, 0x1a, 0x6e, 0x5b, 0x6e, 0x6a, 0xcf, 0xe2, + 0x1f, 0x53, 0xc3, 0x66, 0xa8, 0xa1, 0x2d, 0xc2, 0x2c, 0x7b, 0x54, 0xcd, 0x5f, 0x66, 0xa9, 0xfc, + 0x03, 0xbd, 0x0d, 0xb3, 0x1d, 0xac, 0x13, 0x2c, 0x1e, 0x5d, 0x2d, 0x9d, 0x42, 0x04, 0xec, 0x55, + 0xb2, 0xca, 0xd5, 0x51, 0x05, 0xe2, 0xfc, 0x9e, 0x90, 0xdd, 0xee, 0x4d, 0xa6, 0x45, 0x4f, 0x7c, + 0x5f, 0xb7, 0x3a, 0xa3, 0x8a, 0x9a, 0xa8, 0x0e, 0x09, 0x97, 0x5f, 0x0d, 0xb3, 0x3b, 0xbf, 0x33, + 0x0f, 0xfa, 0xa1, 0x9b, 0xe7, 0xd5, 0x19, 0xd5, 0xaf, 0x8b, 0xb6, 0xfd, 0xf7, 0x19, 0xdc, 0xa3, + 0x88, 0xe7, 0x27, 0xa5, 0x29, 0x83, 0xc1, 0x21, 0xe0, 0x08, 0x0a, 0x1d, 0xa0, 0xc5, 0xee, 0x09, + 0xd8, 0xd5, 0xe1, 0xe9, 0x03, 0x1c, 0xb9, 0x73, 0xa6, 0x03, 0xe4, 0x35, 0xd1, 0x06, 0x80, 0x11, + 0x78, 0x39, 0x76, 0xa9, 0x38, 0x77, 0xfd, 0xf5, 0xf3, 0x44, 0x52, 0xab, 0x33, 0x6a, 0x08, 0x01, + 0xdd, 0x81, 0x39, 0x63, 0xb8, 0x6d, 0x73, 0x19, 0x06, 0xf8, 0xc6, 0xb9, 0xb8, 0x7b, 0x95, 0xf2, + 0xf5, 0x50, 0x3a, 0xca, 0xd7, 0xca, 0x38, 0x5f, 0xd7, 0x21, 0x25, 0x72, 0x32, 0xfc, 0x3d, 0x7e, + 0x2e, 0xcb, 0xdc, 0x45, 0xd8, 0x4a, 0xfc, 0x17, 0xfb, 0xa5, 0xba, 0x6d, 0x38, 0x26, 0x36, 0xeb, + 0xf4, 0x5b, 0x15, 0x29, 0x68, 0xf6, 0x41, 0xd0, 0x0a, 0xa4, 0x8d, 0x0e, 0xd6, 0xed, 0x7e, 0xcf, + 0xc7, 0x41, 0x53, 0xe2, 0xa4, 0x44, 0x3d, 0x01, 0xb4, 0x01, 0x68, 0x97, 0xbd, 0xe9, 0x08, 0xf7, + 0x8a, 0xdd, 0x4d, 0x4e, 0x03, 0xa6, 0xb0, 0xba, 0xea, 0xb0, 0x67, 0xe8, 0x25, 0x48, 0xd9, 0x8e, + 0x6d, 0xe8, 0xb6, 0x81, 0x3b, 0xcc, 0xb3, 0xf2, 0xeb, 0xcc, 0x51, 0x21, 0xfa, 0x14, 0xd2, 0x64, + 0xe4, 0xf8, 0x90, 0xbb, 0xc0, 0x5a, 0x7c, 0xf3, 0xbc, 0x09, 0xc7, 0xd5, 0x19, 0x75, 0x0c, 0x09, + 0xfd, 0x16, 0x28, 0xde, 0xd8, 0xad, 0x04, 0xbb, 0x18, 0x3d, 0xfd, 0xfd, 0xd4, 0x09, 0x77, 0x2f, + 0xab, 0x33, 0xea, 0x04, 0x1a, 0xfa, 0x0c, 0x32, 0x64, 0xf4, 0x85, 0x70, 0xee, 0x12, 0x6b, 0xe0, + 0xdb, 0xa7, 0xa6, 0xd6, 0x8f, 0x7b, 0x54, 0xbd, 0x3a, 0xa3, 0x8e, 0x63, 0x51, 0x78, 0x7b, 0xf4, + 0x72, 0x23, 0x97, 0x3b, 0x13, 0xfe, 0xf8, 0xcb, 0x16, 0x0a, 0x3f, 0x86, 0x85, 0xd6, 0x20, 0xd9, + 0xf5, 0x7d, 0x45, 0xee, 0xb9, 0x33, 0x23, 0xee, 0x71, 0xf7, 0xb5, 0x3a, 0xa3, 0x0e, 0xeb, 0x57, + 0x92, 0x90, 0x10, 0x97, 0x60, 0xc1, 0xd5, 0x72, 0x42, 0x91, 0x8b, 0xbf, 0x90, 0x41, 0x0e, 0x62, + 0xd0, 0x65, 0x40, 0x41, 0xd4, 0x30, 0x7c, 0xcf, 0x47, 0x5d, 0x50, 0x64, 0x75, 0x46, 0xcd, 0xfa, + 0x65, 0xc3, 0x27, 0x7d, 0xaf, 0x42, 0xa6, 0xeb, 0x98, 0xd6, 0xae, 0x35, 0x24, 0x7e, 0x9e, 0xf2, + 0x4c, 0xfb, 0x62, 0x41, 0xfc, 0x37, 0x47, 0x9e, 0xbd, 0x4c, 0xf3, 0xdc, 0x9b, 0xf6, 0x3e, 0x78, + 0x17, 0x43, 0x1d, 0x91, 0xdb, 0xb7, 0xd9, 0x7d, 0x97, 0x38, 0xf6, 0xf2, 0xf8, 0x2a, 0x25, 0xa4, + 0xe2, 0xe4, 0x5a, 0x1d, 0x63, 0xe6, 0xab, 0x67, 0x32, 0xb3, 0x3f, 0xf6, 0x55, 0x29, 0xa0, 0xe6, + 0x5b, 0xe3, 0xd4, 0x7c, 0xed, 0x6c, 0x6a, 0x0e, 0xc1, 0x04, 0xdc, 0xbc, 0x73, 0x2c, 0x37, 0x2f, + 0x4f, 0xb9, 0x71, 0x42, 0x88, 0xa3, 0xe4, 0x5c, 0x1d, 0x23, 0xe7, 0xab, 0x67, 0x92, 0x73, 0x78, + 0x8c, 0x82, 0x9d, 0x37, 0x8f, 0x61, 0xe7, 0x37, 0xa6, 0x62, 0xe7, 0x10, 0x58, 0x98, 0x9e, 0xd5, + 0xe3, 0xe8, 0xb9, 0x34, 0x1d, 0x3d, 0x87, 0x20, 0x47, 0xf8, 0xf9, 0x7b, 0x13, 0xdc, 0xa3, 0x9c, + 0xbd, 0x79, 0x8f, 0xcd, 0x75, 0xac, 0x4a, 0x13, 0xe4, 0xa3, 0x1f, 0x43, 0x3e, 0x59, 0x06, 0xff, + 0xd6, 0x39, 0xc8, 0x27, 0xd4, 0xc0, 0x24, 0xfb, 0x7c, 0x0c, 0xf3, 0x61, 0xc6, 0x60, 0xcf, 0x4b, + 0x4e, 0xe7, 0xb6, 0x13, 0xfe, 0x9c, 0x81, 0xd9, 0x40, 0xa8, 0x08, 0x7d, 0x7f, 0x92, 0x78, 0x16, + 0xce, 0x04, 0x3f, 0xe1, 0x1e, 0x76, 0x55, 0x9a, 0x64, 0x9e, 0xdb, 0x61, 0xe6, 0x59, 0x3c, 0xd3, + 0x77, 0x4f, 0x44, 0xb4, 0xab, 0x52, 0x98, 0x7a, 0x00, 0x64, 0xff, 0x9a, 0x3f, 0x44, 0x43, 0xc5, + 0x3f, 0x91, 0x20, 0xda, 0x74, 0xda, 0x28, 0x3d, 0x4c, 0xc2, 0xb1, 0xf4, 0xd9, 0x07, 0x43, 0x75, + 0x71, 0xda, 0x7b, 0xf1, 0x94, 0xb6, 0x83, 0xa4, 0x65, 0x50, 0x09, 0xbd, 0x07, 0x89, 0x1e, 0x8f, + 0xa6, 0x05, 0xd3, 0x14, 0x4f, 0xab, 0xcf, 0x35, 0x55, 0xbf, 0xca, 0xb5, 0xab, 0xe1, 0xbf, 0x6c, + 0x5a, 0x77, 0x4c, 0x8c, 0xd2, 0x00, 0x5b, 0x3a, 0x21, 0xbd, 0x7d, 0x57, 0x27, 0x58, 0x99, 0x41, + 0x09, 0x88, 0xae, 0xad, 0xb7, 0x14, 0xe9, 0xda, 0xc7, 0xe1, 0x94, 0x59, 0x4d, 0x2d, 0x37, 0x36, + 0x1a, 0x1b, 0x2b, 0xda, 0x46, 0x79, 0xbd, 0xde, 0x52, 0x66, 0x50, 0x0e, 0x16, 0x3f, 0x2a, 0x37, + 0xb6, 0x45, 0x0e, 0x4d, 0x6b, 0x6c, 0x6c, 0xd7, 0xd5, 0xbb, 0xe5, 0xdb, 0x8a, 0x84, 0x2e, 0x02, + 0x52, 0x37, 0xab, 0x6b, 0xad, 0x5a, 0x45, 0xab, 0x6e, 0xae, 0x6f, 0x95, 0xab, 0xdb, 0x8d, 0xcd, + 0x0d, 0x25, 0x82, 0x64, 0x88, 0xd5, 0x36, 0x37, 0xea, 0x0a, 0x5c, 0xfb, 0x71, 0x0c, 0x62, 0xd4, + 0xd6, 0xd0, 0x4b, 0x30, 0xb7, 0xb3, 0xd1, 0xda, 0xaa, 0x57, 0x1b, 0xb7, 0x1a, 0xf5, 0x9a, 0x32, + 0x93, 0x5f, 0x78, 0xf4, 0x78, 0x29, 0x43, 0x8b, 0x76, 0x6c, 0xd2, 0xc3, 0x06, 0x23, 0x59, 0x94, + 0x87, 0x78, 0xa5, 0x5c, 0x5d, 0xdb, 0xd9, 0x52, 0xa4, 0x7c, 0xfa, 0xd1, 0xe3, 0x25, 0xa0, 0x0a, + 0x9c, 0xe0, 0xd0, 0xf3, 0x90, 0x50, 0xeb, 0xad, 0xed, 0x4d, 0xb5, 0xae, 0x44, 0xf2, 0x99, 0x47, + 0x8f, 0x97, 0xe6, 0x68, 0xa1, 0xe0, 0x2d, 0xf4, 0x2a, 0xa4, 0x5a, 0xd5, 0xd5, 0xfa, 0x7a, 0x59, + 0xab, 0xae, 0x96, 0x37, 0x56, 0xea, 0x4a, 0x34, 0xbf, 0xf8, 0xe8, 0xf1, 0x92, 0x32, 0x6e, 0xe8, + 0xb4, 0x89, 0xc6, 0xfa, 0xd6, 0xa6, 0xba, 0xad, 0xc4, 0x86, 0x4d, 0x70, 0x7e, 0x41, 0x45, 0x00, + 0x5e, 0xfb, 0x56, 0xbd, 0x5e, 0x53, 0x66, 0xf3, 0xe8, 0xd1, 0xe3, 0xa5, 0x34, 0x2d, 0x1f, 0xd2, + 0x06, 0x7a, 0x19, 0xe6, 0xab, 0x6a, 0xbd, 0xbc, 0x5d, 0xd7, 0x5a, 0xdb, 0xe5, 0xed, 0x96, 0x12, + 0x1f, 0x8e, 0x24, 0x44, 0x05, 0xa8, 0x04, 0xd9, 0xf2, 0xce, 0xf6, 0xa6, 0x36, 0xa2, 0x9b, 0xc8, + 0x5f, 0x7a, 0xf4, 0x78, 0x69, 0x81, 0xea, 0x96, 0xfb, 0x9e, 0x13, 0xd6, 0x7f, 0x1d, 0x94, 0x91, + 0xfe, 0x6b, 0x2b, 0x55, 0x45, 0xce, 0x5f, 0x7c, 0xf4, 0x78, 0x09, 0x8d, 0x0f, 0x61, 0xa5, 0x8a, + 0x7e, 0x1d, 0x2e, 0x6e, 0x7f, 0xb2, 0x55, 0xaf, 0xd5, 0x5b, 0x55, 0x6d, 0x74, 0xd8, 0xc9, 0x7c, + 0xee, 0xd1, 0xe3, 0xa5, 0x45, 0x5a, 0x67, 0x62, 0xe8, 0x6f, 0x80, 0xd2, 0xda, 0x56, 0xeb, 0xe5, + 0x75, 0xad, 0xb1, 0xb1, 0x52, 0x6f, 0xb1, 0xc5, 0x82, 0x61, 0x97, 0xc6, 0x36, 0x2d, 0x1d, 0xc2, + 0x46, 0xfd, 0xa3, 0x31, 0xfc, 0xb9, 0xa1, 0xfe, 0xd8, 0x3e, 0x44, 0x4b, 0x90, 0x5c, 0x6f, 0xac, + 0xa8, 0x65, 0x86, 0x3b, 0x9f, 0xcf, 0x3e, 0x7a, 0xbc, 0x94, 0xa2, 0x7a, 0xc1, 0xae, 0xca, 0xcb, + 0x3f, 0xfa, 0xcb, 0xcb, 0x33, 0x7f, 0xf5, 0x93, 0xcb, 0x33, 0x95, 0x2b, 0x5f, 0xfc, 0xe7, 0xe5, + 0x99, 0x2f, 0x8e, 0x2e, 0x4b, 0x3f, 0x3b, 0xba, 0x2c, 0x7d, 0x79, 0x74, 0x59, 0xfa, 0x8f, 0xa3, + 0xcb, 0xd2, 0x1f, 0x7d, 0x75, 0x79, 0xe6, 0x67, 0x5f, 0x5d, 0x9e, 0xf9, 0xf2, 0xab, 0xcb, 0x33, + 0x9f, 0xc6, 0xb9, 0x5d, 0xb7, 0xe3, 0xec, 0x68, 0xf8, 0xd6, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, + 0xc3, 0x5d, 0x08, 0x53, 0x8a, 0x3a, 0x00, 0x00, } diff --git a/pkg/jobs/jobspb/jobs.proto b/pkg/jobs/jobspb/jobs.proto index d1a82a90f106..cca8dedcd4d7 100644 --- a/pkg/jobs/jobspb/jobs.proto +++ b/pkg/jobs/jobspb/jobs.proto @@ -20,6 +20,7 @@ import "sql/catalog/descpb/structured.proto"; import "sql/catalog/descpb/tenant.proto"; import "util/hlc/timestamp.proto"; import "sql/schemachanger/scpb/scpb.proto"; +import "clusterversion/cluster_version.proto"; message Lease { option (gogoproto.equal) = true; @@ -322,6 +323,7 @@ message NewSchemaChangeProgress { repeated cockroach.sql.schemachanger.scpb.State states = 1; } + message ResumeSpanList { repeated roachpb.Span resume_spans = 1 [(gogoproto.nullable) = false]; } @@ -577,6 +579,14 @@ message CreateStatsProgress { } +message MigrationDetails { + clusterversion.ClusterVersion cluster_version = 1; +} + +message MigrationProgress { + +} + message Payload { string description = 1; // If empty, the description is assumed to be the statement. @@ -619,6 +629,7 @@ message Payload { TypeSchemaChangeDetails typeSchemaChange = 22; StreamIngestionDetails streamIngestion = 23; NewSchemaChangeDetails newSchemaChange = 24; + MigrationDetails migration = 25; } } @@ -641,6 +652,7 @@ message Progress { TypeSchemaChangeProgress typeSchemaChange = 17; StreamIngestionProgress streamIngest = 18; NewSchemaChangeProgress newSchemaChange = 19; + MigrationProgress migration = 20; } } @@ -662,6 +674,7 @@ enum Type { TYPEDESC_SCHEMA_CHANGE = 9 [(gogoproto.enumvalue_customname) = "TypeTypeSchemaChange"]; STREAM_INGESTION = 10 [(gogoproto.enumvalue_customname) = "TypeStreamIngestion"]; NEW_SCHEMA_CHANGE = 11 [(gogoproto.enumvalue_customname) = "TypeNewSchemaChange"]; + MIGRATION = 12 [(gogoproto.enumvalue_customname) = "TypeMigration"]; } message Job { diff --git a/pkg/jobs/jobspb/wrap.go b/pkg/jobs/jobspb/wrap.go index 44809875b621..e5abc838df6c 100644 --- a/pkg/jobs/jobspb/wrap.go +++ b/pkg/jobs/jobspb/wrap.go @@ -30,6 +30,7 @@ var _ Details = CreateStatsDetails{} var _ Details = SchemaChangeGCDetails{} var _ Details = StreamIngestionDetails{} var _ Details = NewSchemaChangeDetails{} +var _ Details = MigrationDetails{} // ProgressDetails is a marker interface for job progress details proto structs. type ProgressDetails interface{} @@ -42,6 +43,7 @@ var _ ProgressDetails = CreateStatsProgress{} var _ ProgressDetails = SchemaChangeGCProgress{} var _ ProgressDetails = StreamIngestionProgress{} var _ ProgressDetails = NewSchemaChangeProgress{} +var _ ProgressDetails = MigrationProgress{} // Type returns the payload's job type. func (p *Payload) Type() Type { @@ -75,6 +77,8 @@ func DetailsType(d isPayload_Details) Type { return TypeStreamIngestion case *Payload_NewSchemaChange: return TypeNewSchemaChange + case *Payload_Migration: + return TypeMigration default: panic(errors.AssertionFailedf("Payload.Type called on a payload with an unknown details type: %T", d)) } @@ -109,6 +113,8 @@ func WrapProgressDetails(details ProgressDetails) interface { return &Progress_StreamIngest{StreamIngest: &d} case NewSchemaChangeProgress: return &Progress_NewSchemaChange{NewSchemaChange: &d} + case MigrationProgress: + return &Progress_Migration{Migration: &d} default: panic(errors.AssertionFailedf("WrapProgressDetails: unknown details type %T", d)) } @@ -138,6 +144,8 @@ func (p *Payload) UnwrapDetails() Details { return *d.StreamIngestion case *Payload_NewSchemaChange: return *d.NewSchemaChange + case *Payload_Migration: + return *d.Migration default: return nil } @@ -167,6 +175,8 @@ func (p *Progress) UnwrapDetails() ProgressDetails { return *d.StreamIngest case *Progress_NewSchemaChange: return *d.NewSchemaChange + case *Progress_Migration: + return *d.Migration default: return nil } @@ -209,6 +219,8 @@ func WrapPayloadDetails(details Details) interface { return &Payload_StreamIngestion{StreamIngestion: &d} case NewSchemaChangeDetails: return &Payload_NewSchemaChange{NewSchemaChange: &d} + case MigrationDetails: + return &Payload_Migration{Migration: &d} default: panic(errors.AssertionFailedf("jobs.WrapPayloadDetails: unknown details type %T", d)) } @@ -244,7 +256,7 @@ const ( func (Type) SafeValue() {} // NumJobTypes is the number of jobs types. -const NumJobTypes = 12 +const NumJobTypes = 13 func init() { if len(Type_name) != NumJobTypes { diff --git a/pkg/keys/constants.go b/pkg/keys/constants.go index 150372342a4e..428fd0a3d466 100644 --- a/pkg/keys/constants.go +++ b/pkg/keys/constants.go @@ -399,6 +399,7 @@ const ( ScheduledJobsTableID = 37 TenantsRangesID = 38 // pseudo SqllivenessID = 39 + MigrationsID = 40 // CommentType is type for system.comments DatabaseCommentType = 0 diff --git a/pkg/migration/BUILD.bazel b/pkg/migration/BUILD.bazel index ffe77f76d105..6ae5ffb2a052 100644 --- a/pkg/migration/BUILD.bazel +++ b/pkg/migration/BUILD.bazel @@ -1,12 +1,12 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "migration", srcs = [ - "helper.go", - "manager.go", - "migrations.go", - "util.go", + "helpers.go", + "kv_migration.go", + "migration.go", + "sql_migration.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/migration", visibility = ["//visibility:public"], @@ -14,57 +14,10 @@ go_library( "//pkg/clusterversion", "//pkg/keys", "//pkg/kv", - "//pkg/kv/kvserver/liveness/livenesspb", "//pkg/roachpb", - "//pkg/rpc", - "//pkg/rpc/nodedialer", - "//pkg/server/serverpb", - "//pkg/sql", - "//pkg/sql/sqlutil", - "//pkg/util/ctxgroup", - "//pkg/util/log", - "//pkg/util/quotapool", - "@com_github_cockroachdb_errors//:errors", - "@com_github_cockroachdb_logtags//:logtags", - "@com_github_cockroachdb_redact//:redact", - "@org_golang_google_grpc//:go_default_library", - ], -) - -go_test( - name = "migration_test", - srcs = [ - "client_test.go", - "helper_test.go", - "main_test.go", - "migrations_test.go", - "util_test.go", - ], - embed = [":migration"], - deps = [ - "//pkg/base", - "//pkg/clusterversion", - "//pkg/kv", - "//pkg/kv/kvserver", - "//pkg/kv/kvserver/batcheval", - "//pkg/kv/kvserver/liveness", - "//pkg/kv/kvserver/liveness/livenesspb", - "//pkg/kv/kvserver/stateloader", - "//pkg/roachpb", - "//pkg/security", - "//pkg/security/securitytest", - "//pkg/server", "//pkg/server/serverpb", "//pkg/settings/cluster", - "//pkg/sql/tests", - "//pkg/testutils", - "//pkg/testutils/serverutils", - "//pkg/testutils/testcluster", - "//pkg/util/leaktest", "//pkg/util/log", - "//pkg/util/syncutil", - "@com_github_cockroachdb_errors//:errors", - "@com_github_stretchr_testify//require", - "@org_golang_google_grpc//:go_default_library", + "@com_github_cockroachdb_logtags//:logtags", ], ) diff --git a/pkg/migration/helper.go b/pkg/migration/helper.go deleted file mode 100644 index f877417d22c1..000000000000 --- a/pkg/migration/helper.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2020 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package migration - -import ( - "context" - - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/rpc" - "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" - "github.com/cockroachdb/cockroach/pkg/server/serverpb" - "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" - "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/quotapool" - "github.com/cockroachdb/errors" - "github.com/cockroachdb/redact" - "google.golang.org/grpc" -) - -// Helper captures all the primitives required to fully specify a migration. -type Helper struct { - c cluster - cv clusterversion.ClusterVersion -} - -// cluster mediates access to the crdb cluster. -type cluster interface { - // nodes returns the IDs and epochs for all nodes that are currently part of - // the cluster (i.e. they haven't been decommissioned away). Migrations have - // the pre-requisite that all nodes are up and running so that we're able to - // execute all relevant node-level operations on them. If any of the nodes - // are found to be unavailable, an error is returned. - // - // It's important to note that this makes no guarantees about new nodes - // being added to the cluster. It's entirely possible for that to happen - // concurrently with the retrieval of the current set of nodes. Appropriate - // usage of this entails wrapping it under a stabilizing loop, like we do in - // EveryNode. - nodes(ctx context.Context) (nodes, error) - - // dial returns a grpc connection to the given node. - dial(context.Context, roachpb.NodeID) (*grpc.ClientConn, error) - - // db provides access the kv.DB instance backing the cluster. - // - // TODO(irfansharif): We could hide the kv.DB instance behind an interface - // to expose only relevant, vetted bits of kv.DB. It'll make our tests less - // "integration-ey". - db() *kv.DB - - // executor provides access to an internal executor instance to run - // arbitrary SQL statements. - executor() sqlutil.InternalExecutor -} - -func newHelper(c cluster, cv clusterversion.ClusterVersion) *Helper { - return &Helper{c: c, cv: cv} -} - -// ForEveryNode is a short hand to execute the given closure (named by the -// informational parameter op) against every node in the cluster at a given -// point in time. Given it's possible for nodes to join or leave the cluster -// during (we don't make any guarantees for the ordering of cluster membership -// events), we only expect this to be used in conjunction with -// UntilClusterStable (see the comment there for how these two primitives can be -// put together). -func (h *Helper) ForEveryNode( - ctx context.Context, op string, fn func(context.Context, serverpb.MigrationClient) error, -) error { - ns, err := h.c.nodes(ctx) - if err != nil { - return err - } - - // We'll want to rate limit outgoing RPCs (limit pulled out of thin air). - qp := quotapool.NewIntPool("every-node", 25) - log.Infof(ctx, "executing %s on nodes %s", redact.Safe(op), ns) - grp := ctxgroup.WithContext(ctx) - - for _, node := range ns { - id := node.id // copy out of the loop variable - alloc, err := qp.Acquire(ctx, 1) - if err != nil { - return err - } - - grp.GoCtx(func(ctx context.Context) error { - defer alloc.Release() - - conn, err := h.c.dial(ctx, id) - if err != nil { - return err - } - client := serverpb.NewMigrationClient(conn) - return fn(ctx, client) - }) - } - return grp.Wait() -} - -// UntilClusterStable invokes the given closure until the cluster membership is -// stable, i.e once the set of nodes in the cluster before and after the closure -// are identical, and no nodes have restarted in the interim, we can return to -// the caller[*]. -// -// The mechanism for doing so, while accounting for the possibility of new nodes -// being added to the cluster in the interim, is provided by the following -// structure: -// (a) We'll retrieve the list of node IDs for all nodes in the system -// (b) We'll invoke the closure -// (c) We'll retrieve the list of node IDs again to account for the -// possibility of a new node being added during (b), or a node -// restarting -// (d) If there any discrepancies between the list retrieved in (a) -// and (c), we'll invoke the closure again -// (e) We'll continue to loop around until the node ID list stabilizes -// -// [*]: We can be a bit more precise here. What UntilClusterStable gives us is a -// strict causal happened-before relation between running the given closure and -// the next node that joins the cluster. Put another way: using -// UntilClusterStable callers will have managed to run something without a new -// node joining half-way through (which could have allowed it to pick up some -// state off one of the existing nodes that hadn't heard from us yet). -// -// To consider an example of how this primitive is used, let's consider our use -// of it to bump the cluster version. We use in conjunction with ForEveryNode, -// where after we return, we can rely on the guarantee that all nodes in the -// cluster will have their cluster versions bumped. This then implies that -// future node additions will observe the latest version (through the join RPC). -// That in turn lets us author migrations that can assume that a certain version -// gate has been enabled on all nodes in the cluster, and will always be enabled -// for any new nodes in the system. -// -// Given that it'll always be possible for new nodes to join after an -// UntilClusterStable round, it means that some migrations may have to be split -// up into two version bumps: one that phases out the old version (i.e. stops -// creation of stale data or behavior) and a clean-up version, which removes any -// vestiges of the stale data/behavior, and which, when active, ensures that the -// old data has vanished from the system. This is similar in spirit to how -// schema changes are split up into multiple smaller steps that are carried out -// sequentially. -func (h *Helper) UntilClusterStable(ctx context.Context, fn func() error) error { - ns, err := h.c.nodes(ctx) - if err != nil { - return err - } - - for { - if err := fn(); err != nil { - return err - } - - curNodes, err := h.c.nodes(ctx) - if err != nil { - return err - } - - if ok, diffs := ns.identical(curNodes); !ok { - log.Infof(ctx, "%s, retrying", diffs) - ns = curNodes - continue - } - - break - } - - return nil -} - -// IterateRangeDescriptors provides a handle on every range descriptor in the -// system, which callers can then use to send out arbitrary KV requests to in -// order to run arbitrary KV-level migrations. These requests will typically -// just be the `Migrate` request, with code added within [1] to do the specific -// things intended for the specified version. -// -// It's important to note that the closure is being executed in the context of a -// distributed transaction that may be automatically retried. So something like -// the following is an anti-pattern: -// -// processed := 0 -// _ = h.IterateRangeDescriptors(..., -// func(descriptors ...roachpb.RangeDescriptor) error { -// processed += len(descriptors) // we'll over count if retried -// log.Infof(ctx, "processed %d ranges", processed) -// }, -// ) -// -// Instead we allow callers to pass in a callback to signal on every attempt -// (including the first). This lets us salvage the example above: -// -// var processed int -// init := func() { processed = 0 } -// _ = h.IterateRangeDescriptors(..., init, -// func(descriptors ...roachpb.RangeDescriptor) error { -// processed += len(descriptors) -// log.Infof(ctx, "processed %d ranges", processed) -// }, -// ) -// -// [1]: pkg/kv/kvserver/batch_eval/cmd_migrate.go -func (h *Helper) IterateRangeDescriptors( - ctx context.Context, blockSize int, init func(), fn func(...roachpb.RangeDescriptor) error, -) error { - if err := h.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - // Inform the caller that we're starting a fresh attempt to page in - // range descriptors. - init() - - // Iterate through meta2 to pull out all the range descriptors. - return txn.Iterate(ctx, keys.Meta2Prefix, keys.MetaMax, blockSize, - func(rows []kv.KeyValue) error { - descriptors := make([]roachpb.RangeDescriptor, len(rows)) - for i, row := range rows { - if err := row.ValueProto(&descriptors[i]); err != nil { - return errors.Wrapf(err, - "unable to unmarshal range descriptor from %s", - row.Key, - ) - } - } - - // Invoke fn with the current chunk (of size ~blockSize) of - // range descriptors. - if err := fn(descriptors...); err != nil { - return err - } - - return nil - }) - }); err != nil { - return err - } - - return nil -} - -// DB provides exposes the underlying *kv.DB instance. -func (h *Helper) DB() *kv.DB { - return h.c.db() -} - -// ClusterVersion exposes the cluster version associated with the ongoing -// migration. -func (h *Helper) ClusterVersion() clusterversion.ClusterVersion { - return h.cv -} - -type clusterImpl struct { - nl nodeLiveness - exec sqlutil.InternalExecutor - dialer *nodedialer.Dialer - kvDB *kv.DB -} - -var _ cluster = &clusterImpl{} - -func newCluster( - nl nodeLiveness, dialer *nodedialer.Dialer, executor *sql.InternalExecutor, db *kv.DB, -) *clusterImpl { - return &clusterImpl{nl: nl, dialer: dialer, exec: executor, kvDB: db} -} - -// nodes implements the cluster interface. -func (c *clusterImpl) nodes(ctx context.Context) (nodes, error) { - var ns []node - ls, err := c.nl.GetLivenessesFromKV(ctx) - if err != nil { - return nil, err - } - for _, l := range ls { - if l.Membership.Decommissioned() { - continue - } - live, err := c.nl.IsLive(l.NodeID) - if err != nil { - return nil, err - } - if !live { - return nil, errors.Newf("n%d required, but unavailable", l.NodeID) - } - ns = append(ns, node{id: l.NodeID, epoch: l.Epoch}) - } - return ns, nil -} - -// dial implements the cluster interface. -func (c *clusterImpl) dial(ctx context.Context, id roachpb.NodeID) (*grpc.ClientConn, error) { - return c.dialer.Dial(ctx, id, rpc.DefaultClass) -} - -// db implements the cluster interface. -func (c *clusterImpl) db() *kv.DB { - return c.kvDB -} - -// executor implements the cluster interface. -func (c *clusterImpl) executor() sqlutil.InternalExecutor { - return c.exec -} diff --git a/pkg/migration/helper_test.go b/pkg/migration/helper_test.go deleted file mode 100644 index 80ffdf9aa822..000000000000 --- a/pkg/migration/helper_test.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2020 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package migration - -import ( - "context" - "fmt" - "testing" - - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server/serverpb" - "github.com/cockroachdb/cockroach/pkg/testutils" - "github.com/cockroachdb/cockroach/pkg/util/leaktest" - "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "google.golang.org/grpc" -) - -func TestHelperEveryNodeUntilClusterStable(t *testing.T) { - defer leaktest.AfterTest(t) - - cv := clusterversion.ClusterVersion{} - ctx := context.Background() - var mu syncutil.Mutex - const numNodes = 3 - - t.Run("with-node-addition", func(t *testing.T) { - // Add a node mid-way through execution. We expect EveryNode to start - // over from scratch and include the newly added node. - tc := TestingNewCluster(numNodes) - h := newHelper(tc, cv) - opCount := 0 - err := h.UntilClusterStable(ctx, func() error { - return h.ForEveryNode(ctx, "dummy-op", func(context.Context, serverpb.MigrationClient) error { - mu.Lock() - defer mu.Unlock() - - opCount++ - if opCount == numNodes { - tc.addNode() - } - - return nil - }) - }) - if err != nil { - t.Fatal(err) - } - - if exp := numNodes*2 + 1; exp != opCount { - t.Fatalf("expected closure to be invoked %d times, got %d", exp, opCount) - } - }) - - t.Run("with-node-restart", func(t *testing.T) { - // Restart a node mid-way through execution. We expect EveryNode to - // start over from scratch and include the restarted node. - tc := TestingNewCluster(numNodes) - h := newHelper(tc, cv) - opCount := 0 - err := h.UntilClusterStable(ctx, func() error { - return h.ForEveryNode(ctx, "dummy-op", func(context.Context, serverpb.MigrationClient) error { - mu.Lock() - defer mu.Unlock() - - opCount++ - if opCount == numNodes { - tc.restartNode(2) - } - - return nil - }) - }) - if err != nil { - t.Fatal(err) - } - - if exp := numNodes * 2; exp != opCount { - t.Fatalf("expected closure to be invoked %d times, got %d", exp, opCount) - } - }) - - t.Run("with-node-downNode", func(t *testing.T) { - // Down a node mid-way through execution. We expect EveryNode to error - // out. - const downedNode = 2 - tc := TestingNewCluster(numNodes) - expRe := fmt.Sprintf("n%d required, but unavailable", downedNode) - h := newHelper(tc, cv) - opCount := 0 - if err := h.UntilClusterStable(ctx, func() error { - return h.ForEveryNode(ctx, "dummy-op", func(context.Context, serverpb.MigrationClient) error { - mu.Lock() - defer mu.Unlock() - - opCount++ - if opCount == 1 { - tc.downNode(downedNode) - } - return nil - }) - }); !testutils.IsError(err, expRe) { - t.Fatalf("expected error %q, got %q", expRe, err) - } - - tc.restartNode(downedNode) - if err := h.UntilClusterStable(ctx, func() error { - return h.ForEveryNode(ctx, "dummy-op", func(context.Context, serverpb.MigrationClient) error { - return nil - }) - }); err != nil { - t.Fatal(err) - } - }) -} - -func TestClusterNodes(t *testing.T) { - defer leaktest.AfterTest(t) - - ctx := context.Background() - const numNodes = 3 - - t.Run("retrieves-all", func(t *testing.T) { - nl := newTestNodeLiveness(numNodes) - c := clusterImpl{nl: nl} - - ns, err := c.nodes(ctx) - if err != nil { - t.Fatal(err) - } - - if got := len(ns); got != numNodes { - t.Fatalf("expected %d nodes, got %d", numNodes, got) - } - - for i := range ns { - if exp := roachpb.NodeID(i + 1); exp != ns[i].id { - t.Fatalf("expected to find node ID %s, got %s", exp, ns[i].id) - } - if ns[i].epoch != 1 { - t.Fatalf("expected to find epoch=1, got %d", ns[i].epoch) - } - } - }) - - t.Run("ignores-decommissioned", func(t *testing.T) { - nl := newTestNodeLiveness(numNodes) - c := clusterImpl{nl: nl} - const decommissionedNode = 3 - nl.decommission(decommissionedNode) - - ns, err := c.nodes(ctx) - if err != nil { - t.Fatal(err) - } - - if got := len(ns); got != numNodes-1 { - t.Fatalf("expected %d nodes, got %d", numNodes-1, got) - } - - for i := range ns { - if exp := roachpb.NodeID(i + 1); exp != ns[i].id { - t.Fatalf("expected to find node ID %s, got %s", exp, ns[i].id) - } - if ns[i].epoch != 1 { - t.Fatalf("expected to find epoch=1, got %d", ns[i].epoch) - } - } - }) - - t.Run("errors-if-down", func(t *testing.T) { - nl := newTestNodeLiveness(numNodes) - c := clusterImpl{nl: nl} - const downedNode = 3 - nl.downNode(downedNode) - - _, err := c.nodes(ctx) - expRe := fmt.Sprintf("n%d required, but unavailable", downedNode) - if !testutils.IsError(err, expRe) { - t.Fatalf("expected error %q, got %q", expRe, err) - } - }) -} - -// mockClusterImpl is a testing only implementation of the cluster interface. It -// lets callers mock out adding, killing, and restarting nodes in the cluster. -type mockClusterImpl struct { - nl *mockNodeLivenessImpl - *clusterImpl -} - -var _ cluster = &mockClusterImpl{} - -// TestingNewCluster is an exported a constructor for a test-only implementation -// of the cluster interface. -func TestingNewCluster(numNodes int, options ...func(*mockClusterImpl)) *mockClusterImpl { - nl := newTestNodeLiveness(numNodes) - tc := &mockClusterImpl{ - nl: nl, - clusterImpl: newCluster(nl, nil, nil, nil), - } - for _, option := range options { - option(tc) - } - return tc -} - -// TestingWithKV facilitates the creation of a test cluster backed by the given -// KV instance. -func TestingWithKV(db *kv.DB) func(*mockClusterImpl) { - return func(impl *mockClusterImpl) { - impl.clusterImpl.kvDB = db - } -} - -// dial is part of the cluster interface. We override it here as tests don't -// expect to make any outbound requests. -func (t *mockClusterImpl) dial(context.Context, roachpb.NodeID) (*grpc.ClientConn, error) { - return nil, nil -} - -func (t *mockClusterImpl) addNode() { - t.nl.addNode(roachpb.NodeID(len(t.nl.ls) + 1)) -} - -func (t *mockClusterImpl) downNode(id roachpb.NodeID) { - t.nl.downNode(id) -} - -func (t *mockClusterImpl) restartNode(id roachpb.NodeID) { - t.nl.restartNode(id) -} - -// mockNodeLivenessImpl is a testing-only implementation of the nodeLiveness. It -// lets tests mock out restarting, killing, decommissioning and adding nodes to -// the cluster. -type mockNodeLivenessImpl struct { - ls []livenesspb.Liveness - dead map[roachpb.NodeID]struct{} -} - -var _ nodeLiveness = &mockNodeLivenessImpl{} - -func newTestNodeLiveness(numNodes int) *mockNodeLivenessImpl { - nl := &mockNodeLivenessImpl{ - ls: make([]livenesspb.Liveness, numNodes), - dead: make(map[roachpb.NodeID]struct{}), - } - for i := 0; i < numNodes; i++ { - nl.ls[i] = livenesspb.Liveness{ - NodeID: roachpb.NodeID(i + 1), Epoch: 1, - Membership: livenesspb.MembershipStatus_ACTIVE, - } - } - return nl -} - -// GetLivenessesFromKV implements the nodeLiveness interface. -func (t *mockNodeLivenessImpl) GetLivenessesFromKV(context.Context) ([]livenesspb.Liveness, error) { - return t.ls, nil -} - -// IsLive implements the nodeLiveness interface. -func (t *mockNodeLivenessImpl) IsLive(id roachpb.NodeID) (bool, error) { - _, dead := t.dead[id] - return !dead, nil -} - -func (t *mockNodeLivenessImpl) decommission(id roachpb.NodeID) { - for i := range t.ls { - if t.ls[i].NodeID == id { - t.ls[i].Membership = livenesspb.MembershipStatus_DECOMMISSIONED - break - } - } -} - -func (t *mockNodeLivenessImpl) addNode(id roachpb.NodeID) { - t.ls = append(t.ls, livenesspb.Liveness{ - NodeID: id, - Epoch: 1, - Membership: livenesspb.MembershipStatus_ACTIVE, - }) -} - -func (t *mockNodeLivenessImpl) downNode(id roachpb.NodeID) { - t.dead[id] = struct{}{} -} - -func (t *mockNodeLivenessImpl) restartNode(id roachpb.NodeID) { - for i := range t.ls { - if t.ls[i].NodeID == id { - t.ls[i].Epoch++ - break - } - } - - delete(t.dead, id) -} - -// TestingNewHelper is an exported a constructor for Helper for testing -// purposes. -func TestingNewHelper(c cluster, cv clusterversion.ClusterVersion) *Helper { - return &Helper{c: c, cv: cv} -} diff --git a/pkg/migration/helpers.go b/pkg/migration/helpers.go new file mode 100644 index 000000000000..b32c76c4eb0c --- /dev/null +++ b/pkg/migration/helpers.go @@ -0,0 +1,44 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migration + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/util/log" +) + +// FenceVersionFor constructs the appropriate "fence version" for the given +// cluster version. Fence versions allow the migrations infrastructure to safely +// step through consecutive cluster versions in the presence of Nodes (running +// any binary version) being added to the cluster. See the migration manager +// above for intended usage. +// +// Fence versions (and the migrations infrastructure entirely) were introduced +// in the 21.1 release cycle. In the same release cycle, we introduced the +// invariant that new user-defined versions (users being crdb engineers) must +// always have even-numbered Internal versions, thus reserving the odd numbers +// to slot in fence versions for each cluster version. See top-level +// documentation in pkg/clusterversion for more details. +func FenceVersionFor( + ctx context.Context, cv clusterversion.ClusterVersion, +) clusterversion.ClusterVersion { + if (cv.Internal % 2) != 0 { + log.Fatalf(ctx, "only even numbered internal versions allowed, found %s", cv.Version) + } + + // We'll pick the odd internal version preceding the cluster version, + // slotting ourselves right before it. + fenceCV := cv + fenceCV.Internal-- + return fenceCV +} diff --git a/pkg/migration/kv_migration.go b/pkg/migration/kv_migration.go new file mode 100644 index 000000000000..734a5a1086df --- /dev/null +++ b/pkg/migration/kv_migration.go @@ -0,0 +1,153 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migration + +import ( + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "github.com/cockroachdb/logtags" +) + +// Cluster abstracts a physical KV cluster and can be utilized by a long-running +// migration. +type Cluster interface { + + // DB returns access to the kv. + DB() *kv.DB + + // ForEveryNode is a short hand to execute the given closure (named by the + // informational parameter op) against every node in the cluster at a given + // point in time. Given it's possible for nodes to join or leave the cluster + // during (we don't make any guarantees for the ordering of cluster membership + // events), we only expect this to be used in conjunction with + // UntilClusterStable (see the comment there for how these two primitives can be + // put together). + ForEveryNode( + ctx context.Context, + op string, + fn func(context.Context, serverpb.MigrationClient) error, + ) error + + // UntilClusterStable invokes the given closure until the cluster membership + // is stable, i.e once the set of nodes in the cluster before and after the + // closure are identical, and no nodes have restarted in the interim, we can + // return to the caller[*]. + // + // The mechanism for doing so, while accounting for the possibility of new + // nodes being added to the cluster in the interim, is provided by the + // following structure: + // (a) We'll retrieve the list of node IDs for all nodes in the system + // (b) We'll invoke the closure + // (c) We'll retrieve the list of node IDs again to account for the + // possibility of a new node being added during (b), or a node + // restarting + // (d) If there any discrepancies between the list retrieved in (a) + // and (c), we'll invoke the closure again + // (e) We'll continue to loop around until the node ID list stabilizes + // + // [*]: We can be a bit more precise here. What UntilClusterStable gives us is + // a strict causal happens-before relation between running the given closure + // and the next node that joins the cluster. Put another way: using + // UntilClusterStable callers will have managed to run something without a new + // node joining halfway through (which could have allowed it to pick up some + // state off one of the existing nodes that hadn't heard from us yet). + // + // To consider an example of how this primitive is used, let's consider our + // use of it to bump the cluster version. We use in conjunction with + // ForEveryNode, where after we return, we can rely on the guarantee that all + // nodes in the cluster will have their cluster versions bumped. This then + // implies that future node additions will observe the latest version (through + // the join RPC). That in turn lets us author migrations that can assume that + // a certain version gate has been enabled on all nodes in the cluster, and + // will always be enabled for any new nodes in the system. + // + // Given that it'll always be possible for new nodes to join after an + // UntilClusterStable round, it means that some migrations may have to be + // split up into two version bumps: one that phases out the old version (i.e. + // stops creation of stale data or behavior) and a cleanup version, which + // removes any vestiges of the stale data/behavior, and which, when active, + // ensures that the old data has vanished from the system. This is similar in + // spirit to how schema changes are split up into multiple smaller steps that + // are carried out sequentially. + UntilClusterStable(ctx context.Context, fn func() error) error + + // IterateRangeDescriptors provides a handle on every range descriptor in the + // system, which callers can then use to send out arbitrary KV requests to in + // order to run arbitrary KV-level migrations. These requests will typically + // just be the `Migrate` request, with code added within [1] to do the + // specific things intended for the specified version. + // + // It's important to note that the closure is being executed in the context of + // a distributed transaction that may be automatically retried. So something + // like the following is an anti-pattern: + // + // processed := 0 + // _ = h.IterateRangeDescriptors(..., + // func(descriptors ...roachpb.RangeDescriptor) error { + // processed += len(descriptors) // we'll over count if retried + // log.Infof(ctx, "processed %d ranges", processed) + // }, + // ) + // + // Instead we allow callers to pass in a callback to signal on every attempt + // (including the first). This lets us salvage the example above: + // + // var processed int + // init := func() { processed = 0 } + // _ = h.IterateRangeDescriptors(..., init, + // func(descriptors ...roachpb.RangeDescriptor) error { + // processed += len(descriptors) + // log.Infof(ctx, "processed %d ranges", processed) + // }, + // ) + // + // [1]: pkg/kv/kvserver/batch_eval/cmd_migrate.go + IterateRangeDescriptors( + ctx context.Context, + size int, + init func(), + f func(descriptors ...roachpb.RangeDescriptor) error, + ) error +} + +// KVMigration is an implementation of Migration for KV-level migrations. +type KVMigration struct { + migration + fn KVMigrationFn +} + +// KVMigrationFn is used to perform kv-level migrations. It should only be +// run from the system tenant. +type KVMigrationFn func(context.Context, clusterversion.ClusterVersion, Cluster) error + +// NewKVMigration constructs a KVMigration. +func NewKVMigration( + description string, cv clusterversion.ClusterVersion, fn KVMigrationFn, +) *KVMigration { + return &KVMigration{ + migration: migration{ + description: description, + cv: cv, + }, + fn: fn, + } +} + +// Run kickstarts the actual migration process for KV-level migrations. +func (m *KVMigration) Run(ctx context.Context, cv clusterversion.ClusterVersion, h Cluster) error { + ctx = logtags.AddTag(ctx, fmt.Sprintf("migration=%s", cv), nil) + return m.fn(ctx, cv, h) +} diff --git a/pkg/migration/manager.go b/pkg/migration/manager.go deleted file mode 100644 index adf989710c98..000000000000 --- a/pkg/migration/manager.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2020 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -// Package migration captures the facilities needed to define and execute -// migrations for a crdb cluster. These migrations can be arbitrarily long -// running, are free to send out arbitrary requests cluster wide, change -// internal DB state, and much more. They're typically reserved for crdb -// internal operations and state. Each migration is idempotent in nature, is -// associated with a specific cluster version, and executed when the cluster -// version is made activate on every node in the cluster. -// -// Examples of migrations that apply would be migrations to move all raft state -// from one storage engine to another, or purging all usage of the replicated -// truncated state in KV. A "sister" package of interest is pkg/sqlmigrations. -package migration - -import ( - "context" - "fmt" - - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" - "github.com/cockroachdb/cockroach/pkg/server/serverpb" - "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/logtags" -) - -// Manager is the instance responsible for executing migrations across the -// cluster. -type Manager struct { - dialer *nodedialer.Dialer - nl nodeLiveness - executor *sql.InternalExecutor - db *kv.DB -} - -// nodeLiveness is the subset of the interface satisfied by CRDB's node liveness -// component that the migration manager relies upon. -type nodeLiveness interface { - GetLivenessesFromKV(context.Context) ([]livenesspb.Liveness, error) - IsLive(roachpb.NodeID) (bool, error) -} - -// NewManager constructs a new Manager. -// -// TODO(irfansharif): We'll need to eventually plumb in on a lease manager here. -func NewManager( - dialer *nodedialer.Dialer, nl nodeLiveness, executor *sql.InternalExecutor, db *kv.DB, -) *Manager { - return &Manager{ - dialer: dialer, - executor: executor, - db: db, - nl: nl, - } -} - -// Migrate runs the set of migrations required to upgrade the cluster version -// from the current version to the target one. -func (m *Manager) Migrate(ctx context.Context, from, to clusterversion.ClusterVersion) error { - // TODO(irfansharif): Should we inject every ctx here with specific labels - // for each migration, so they log distinctly? - ctx = logtags.AddTag(ctx, "migration-mgr", nil) - if from == to { - // Nothing to do here. - log.Infof(ctx, "no need to migrate, cluster already at newest version") - return nil - } - - // TODO(irfansharif): We'll need to acquire a lease here and refresh it - // throughout during the migration to ensure mutual exclusion. - - // TODO(irfansharif): We'll need to create a system table to store - // in-progress state of long running migrations, for introspection. - - clusterVersions := clusterversion.ListBetween(from, to) - if len(clusterVersions) == 0 { - // We're attempt to migrate to something that's not defined in cluster - // versions. This only happens in tests, when we're exercising version - // upgrades over non-existent versions (like in the cluster_version - // logictest). These tests explicitly override the - // binary{,MinSupportedVersion} in order to work. End-user attempts to - // do something similar would be caught at the sql layer (also tested in - // the same logictest). We'll just explicitly append the target version - // here instead, so that we're able to actually migrate into it. - clusterVersions = append(clusterVersions, to) - } - log.Infof(ctx, "migrating cluster from %s to %s (stepping through %s)", from, to, clusterVersions) - - for _, clusterVersion := range clusterVersions { - cluster := newCluster(m.nl, m.dialer, m.executor, m.db) - h := newHelper(cluster, clusterVersion) - - // First run the actual migration (if any). The cluster version bump - // will be rolled out afterwards. This lets us provide the invariant - // that if a version=V is active, all data is guaranteed to have - // migrated. - if migration, ok := registry[clusterVersion]; ok { - if err := migration.Run(ctx, h); err != nil { - return err - } - } - - // Next we'll push out the version gate to every node in the cluster. - // Each node will persist the version, bump the local version gates, and - // then return. The migration associated with the specific version is - // executed before every node in the cluster has the corresponding - // version activated. Migrations that depend on a certain version - // already being activated will need to registered using a cluster - // version greater than it. - // - // For each intermediate version, we'll need to first bump the fence - // version before bumping the "real" one. Doing so allows us to provide - // the invariant that whenever a cluster version is active, all nodes in - // the cluster (including ones added concurrently during version - // upgrades) are running binaries that know about the version. - - // Below-raft migrations mutate replica state, making use of the - // Migrate(version=V) primitive which they issue against the entire - // keyspace. These migrations typically want to rely on the invariant - // that there are no extant replicas in the system that haven't seen the - // specific Migrate command. - // - // This is partly achieved through the implementation of the Migrate - // command itself, which waits until it's applied on all followers[2] - // before returning. This also addresses the concern of extant snapshots - // with pre-migrated state possibly instantiating older version - // replicas. The intended learner replicas are listed as part of the - // range descriptor, and is also waited on for during command - // application. As for stale snapshots, if they specify a replicaID - // that's no longer part of the raft group, they're discarded by the - // recipient. Snapshots are also discarded unless they move the LAI - // forward. - // - // That still leaves rooms for replicas in the replica GC queue to evade - // detection. To address this, below-raft migrations typically take a - // two-phrase approach (the TruncatedAndRangeAppliedStateMigration being - // one example of this), where after having migrated the entire keyspace - // to version V, and after having prevented subsequent snapshots - // originating from replicas with versions < V, the migration sets out - // to purge outdated replicas in the system[3]. Specifically it - // processes all replicas in the GC queue with a version < V (which are - // not accessible during the application of the Migrate command). - // - // [1]: See ReplicaState.Version. - // [2]: See Replica.executeWriteBatch, specifically how proposals with the - // Migrate request are handled downstream of raft. - // [3]: See PurgeOutdatedReplicas from the Migration service. - - { - // The migrations infrastructure makes use of internal fence - // versions when stepping through consecutive versions. It's - // instructive to walk through how we expect a version migration - // from v21.1 to v21.2 to take place, and how we behave in the - // presence of new v21.1 or v21.2 nodes being added to the cluster. - // - // - All nodes are running v21.1 - // - All nodes are rolled into v21.2 binaries, but with active - // cluster version still as v21.1 - // - The first version bump will be into v21.2-1(fence), see the - // migration manager above for where that happens - // - // Then concurrently: - // - // - A new node is added to the cluster, but running binary v21.1 - // - We try bumping the cluster gates to v21.2-1(fence) - // - // If the v21.1 nodes manages to sneak in before the version bump, - // it's fine as the version bump is a no-op one (all fence versions - // are). Any subsequent bumps (including the "actual" one bumping to - // v21.2) will fail during the validation step where we'll first - // check to see that all nodes are running v21.2 binaries. - // - // If the v21.1 node is only added after v21.2-1(fence) is active, - // it won't be able to actually join the cluster (it'll be prevented - // by the join RPC). - // - // All of which is to say that once we've seen the node list - // stabilize (as UntilClusterStable enforces), any new nodes that - // can join the cluster will run a release that support the fence - // version, and by design also supports the actual version (which is - // the direct successor of the fence). - fenceVersion := fenceVersionFor(ctx, clusterVersion) - req := &serverpb.BumpClusterVersionRequest{ClusterVersion: &fenceVersion} - op := fmt.Sprintf("bump-cluster-version=%s", req.ClusterVersion.PrettyPrint()) - if err := h.UntilClusterStable(ctx, func() error { - return h.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { - _, err := client.BumpClusterVersion(ctx, req) - return err - }) - }); err != nil { - return err - } - } - { - // Now sanity check that we'll actually be able to perform the real - // cluster version bump, cluster-wide. - req := &serverpb.ValidateTargetClusterVersionRequest{ClusterVersion: &clusterVersion} - op := fmt.Sprintf("validate-cluster-version=%s", req.ClusterVersion.PrettyPrint()) - if err := h.UntilClusterStable(ctx, func() error { - return h.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { - _, err := client.ValidateTargetClusterVersion(ctx, req) - return err - }) - }); err != nil { - return err - } - } - { - // Finally, bump the real version cluster-wide. - req := &serverpb.BumpClusterVersionRequest{ClusterVersion: &clusterVersion} - op := fmt.Sprintf("bump-cluster-version=%s", req.ClusterVersion.PrettyPrint()) - if err := h.UntilClusterStable(ctx, func() error { - return h.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { - _, err := client.BumpClusterVersion(ctx, req) - return err - }) - }); err != nil { - return err - } - } - } - - return nil -} diff --git a/pkg/migration/migration.go b/pkg/migration/migration.go new file mode 100644 index 000000000000..962b833f827f --- /dev/null +++ b/pkg/migration/migration.go @@ -0,0 +1,83 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package migration captures the facilities needed to define and execute +// migrations for a crdb cluster. These migrations can be arbitrarily long +// running, are free to send out arbitrary requests cluster wide, change +// internal DB state, and much more. They're typically reserved for crdb +// internal operations and state. Each migration is idempotent in nature, is +// associated with a specific cluster version, and executed when the cluster +// version is made active on every node in the cluster. +// +// Examples of migrations that apply would be migrations to move all raft state +// from one storage engine to another, or purging all usage of the replicated +// truncated state in KV. +package migration + +import "github.com/cockroachdb/cockroach/pkg/clusterversion" + +// Migration defines a program to be executed once every node in the cluster is +// (a) running a specific binary version, and (b) has completed all prior +// migrations. +// +// Each migration is associated with a specific internal cluster version and is +// idempotent in nature. When setting the cluster version (via `SET CLUSTER +// SETTING version`), the manager process determines the set of migrations +// needed to bridge the gap between the current active cluster version, and the +// target one. See [1] for where that happens. +// +// To introduce a migration, start by adding version key to pkg/clusterversion +// and introducing a corresponding internal cluster version for it. See [2] for +// more details. Following that, define a Migration in the migrations package +// and add it to the appropriate migrations slice to the registry. Be sure to +// key it in with the new cluster version we just added. During cluster +// upgrades, once the operator is able to set a cluster version setting that's +// past the version that was introduced (typically the major release version +// the migration was introduced in), the manager will execute the defined +// migration before letting the upgrade finalize. +// +// If the migration requires below-Raft level changes ([3] is one example), +// you'll need to add a version switch and the relevant KV-level migration in +// [4]. See IterateRangeDescriptors and the Migrate KV request for more details. +// +// [1]: `(*Manager).Migrate` +// [2]: pkg/clusterversion/cockroach_versions.go +// [3]: truncatedStateMigration +// [4]: pkg/kv/kvserver/batch_eval/cmd_migrate.go +// +type Migration interface { + ClusterVersion() clusterversion.ClusterVersion + + internal() // restrict implementations to this package +} + +// JobDeps are migration-specific dependencies used by the migration job to run +// migrations. +type JobDeps interface { + + // GetMigration returns the migration associated with the cluster version + // if one exists. + GetMigration(key clusterversion.ClusterVersion) (Migration, bool) + + // Cluster returns a handle to the cluster on a system tenant. + Cluster() Cluster +} + +type migration struct { + description string + cv clusterversion.ClusterVersion +} + +// ClusterVersion makes KVMigration a Migration. +func (m *migration) ClusterVersion() clusterversion.ClusterVersion { + return m.cv +} + +func (m *migration) internal() {} diff --git a/pkg/migration/migrationcluster/BUILD.bazel b/pkg/migration/migrationcluster/BUILD.bazel new file mode 100644 index 000000000000..8c69648729bf --- /dev/null +++ b/pkg/migration/migrationcluster/BUILD.bazel @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "migrationcluster", + srcs = [ + "cluster.go", + "nodes.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/migration/migrationcluster", + visibility = ["//visibility:public"], + deps = [ + "//pkg/keys", + "//pkg/kv", + "//pkg/kv/kvserver/liveness/livenesspb", + "//pkg/roachpb", + "//pkg/rpc", + "//pkg/server/serverpb", + "//pkg/util/ctxgroup", + "//pkg/util/log", + "//pkg/util/quotapool", + "@com_github_cockroachdb_errors//:errors", + "@com_github_cockroachdb_redact//:redact", + "@org_golang_google_grpc//:go_default_library", + ], +) + +go_test( + name = "migrationcluster_test", + srcs = [ + "client_test.go", + "helper_test.go", + "main_test.go", + "nodes_test.go", + ], + embed = [":migrationcluster"], + deps = [ + "//pkg/kv/kvserver", + "//pkg/migration/nodelivenesstest", + "//pkg/roachpb", + "//pkg/rpc", + "//pkg/security", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/server/serverpb", + "//pkg/sql/tests", + "//pkg/testutils", + "//pkg/testutils/serverutils", + "//pkg/testutils/testcluster", + "//pkg/util/leaktest", + "//pkg/util/syncutil", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/migration/client_test.go b/pkg/migration/migrationcluster/client_test.go similarity index 78% rename from pkg/migration/client_test.go rename to pkg/migration/migrationcluster/client_test.go index a56a1c490a18..e6d114c3fa75 100644 --- a/pkg/migration/client_test.go +++ b/pkg/migration/migrationcluster/client_test.go @@ -7,25 +7,25 @@ // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package migration_test + +package migrationcluster_test import ( "context" "testing" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" - "github.com/cockroachdb/cockroach/pkg/migration" + "github.com/cockroachdb/cockroach/pkg/migration/migrationcluster" + "github.com/cockroachdb/cockroach/pkg/migration/nodelivenesstest" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" ) -func TestHelperIterateRangeDescriptors(t *testing.T) { +func TestCluster_IterateRangeDescriptors(t *testing.T) { defer leaktest.AfterTest(t) - cv := clusterversion.ClusterVersion{} ctx := context.Background() const numNodes = 1 @@ -41,8 +41,12 @@ func TestHelperIterateRangeDescriptors(t *testing.T) { t.Fatal(err) } - c := migration.TestingNewCluster(numNodes, migration.TestingWithKV(kvDB)) - h := migration.TestingNewHelper(c, cv) + c := nodelivenesstest.New(numNodes) + h := migrationcluster.New(migrationcluster.ClusterConfig{ + NodeLiveness: c, + Dialer: migrationcluster.NoopDialer{}, + DB: kvDB, + }) for _, blockSize := range []int{1, 5, 10, 50} { var numDescs int diff --git a/pkg/migration/migrationcluster/cluster.go b/pkg/migration/migrationcluster/cluster.go new file mode 100644 index 000000000000..9f22204a0f8c --- /dev/null +++ b/pkg/migration/migrationcluster/cluster.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrationcluster + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/rpc" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/quotapool" + "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" + "google.golang.org/grpc" +) + +// Cluster mediates interacting with a cockroach cluster. +type Cluster struct { + c ClusterConfig +} + +// ClusterConfig configures a Cluster. +type ClusterConfig struct { + + // NodeLiveness is used to determine the set of nodes in the cluster. + NodeLiveness NodeLiveness + + // Dialer constructs connections to other nodes. + Dialer NodeDialer + + // DB provides access the kv.DB instance backing the cluster. + // + // TODO(irfansharif): We could hide the kv.DB instance behind an interface + // to expose only relevant, vetted bits of kv.DB. It'll make our tests less + // "integration-ey". + DB *kv.DB +} + +// NodeDialer abstracts connecting to other nodes in the cluster. +type NodeDialer interface { + // Dial returns a grpc connection to the given node. + Dial(context.Context, roachpb.NodeID, rpc.ConnectionClass) (*grpc.ClientConn, error) +} + +// NodeLiveness is the subset of the interface satisfied by CRDB's node liveness +// component that the migration manager relies upon. +type NodeLiveness interface { + GetLivenessesFromKV(context.Context) ([]livenesspb.Liveness, error) + IsLive(roachpb.NodeID) (bool, error) +} + +// New constructs a new Cluster with the provided dependencies. +func New(cfg ClusterConfig) *Cluster { + return &Cluster{c: cfg} +} + +// UntilClusterStable is part of the migration.Cluster interface. +func (c *Cluster) UntilClusterStable(ctx context.Context, fn func() error) error { + ns, err := NodesFromNodeLiveness(ctx, c.c.NodeLiveness) + if err != nil { + return err + } + + for { + if err := fn(); err != nil { + return err + } + curNodes, err := NodesFromNodeLiveness(ctx, c.c.NodeLiveness) + if err != nil { + return err + } + + if ok, diffs := ns.Identical(curNodes); !ok { + log.Infof(ctx, "%s, retrying", diffs) + ns = curNodes + continue + } + break + } + return nil +} + +// ForEveryNode is part of the migration.Cluster interface. +func (c *Cluster) ForEveryNode( + ctx context.Context, op string, fn func(context.Context, serverpb.MigrationClient) error, +) error { + + ns, err := NodesFromNodeLiveness(ctx, c.c.NodeLiveness) + if err != nil { + return err + } + + // We'll want to rate limit outgoing RPCs (limit pulled out of thin air). + qp := quotapool.NewIntPool("every-node", 25) + log.Infof(ctx, "executing %s on nodes %s", redact.Safe(op), ns) + grp := ctxgroup.WithContext(ctx) + + for _, node := range ns { + id := node.ID // copy out of the loop variable + alloc, err := qp.Acquire(ctx, 1) + if err != nil { + return err + } + + grp.GoCtx(func(ctx context.Context) error { + defer alloc.Release() + + conn, err := c.c.Dialer.Dial(ctx, id, rpc.DefaultClass) + if err != nil { + return err + } + client := serverpb.NewMigrationClient(conn) + return fn(ctx, client) + }) + } + return grp.Wait() +} + +// IterateRangeDescriptors is part of the migration.Cluster interface. +func (c *Cluster) IterateRangeDescriptors( + ctx context.Context, blockSize int, init func(), fn func(...roachpb.RangeDescriptor) error, +) error { + if err := c.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + // Inform the caller that we're starting a fresh attempt to page in + // range descriptors. + init() + + // Iterate through meta2 to pull out all the range descriptors. + return txn.Iterate(ctx, keys.Meta2Prefix, keys.MetaMax, blockSize, + func(rows []kv.KeyValue) error { + descriptors := make([]roachpb.RangeDescriptor, len(rows)) + for i, row := range rows { + if err := row.ValueProto(&descriptors[i]); err != nil { + return errors.Wrapf(err, + "unable to unmarshal range descriptor from %s", + row.Key, + ) + } + } + + // Invoke fn with the current chunk (of size ~blockSize) of + // range descriptors. + if err := fn(descriptors...); err != nil { + return err + } + + return nil + }) + }); err != nil { + return err + } + + return nil +} + +// DB provides exposes the underlying *kv.DB instance. +func (c *Cluster) DB() *kv.DB { + return c.c.DB +} diff --git a/pkg/migration/migrationcluster/helper_test.go b/pkg/migration/migrationcluster/helper_test.go new file mode 100644 index 000000000000..7a3a405e1d4f --- /dev/null +++ b/pkg/migration/migrationcluster/helper_test.go @@ -0,0 +1,215 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrationcluster + +import ( + "context" + "fmt" + "testing" + + "github.com/cockroachdb/cockroach/pkg/migration/nodelivenesstest" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/rpc" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "google.golang.org/grpc" +) + +type NoopDialer struct{} + +func (n NoopDialer) Dial( + ctx context.Context, id roachpb.NodeID, class rpc.ConnectionClass, +) (*grpc.ClientConn, error) { + return nil, nil +} + +var _ NodeDialer = NoopDialer{} + +func TestHelperEveryNode(t *testing.T) { + defer leaktest.AfterTest(t) + + ctx := context.Background() + var mu syncutil.Mutex + const numNodes = 3 + + t.Run("with-node-addition", func(t *testing.T) { + // Add a node mid-way through execution. We expect EveryNode to start + // over from scratch and include the newly added node. + tc := nodelivenesstest.New(numNodes) + h := New(ClusterConfig{ + NodeLiveness: tc, + Dialer: NoopDialer{}, + }) + opCount := 0 + err := h.UntilClusterStable(ctx, func() error { + return h.ForEveryNode(ctx, "dummy-op", func( + context.Context, serverpb.MigrationClient, + ) error { + mu.Lock() + defer mu.Unlock() + + opCount++ + if opCount == numNodes { + tc.AddNewNode() + } + + return nil + }) + }) + if err != nil { + t.Fatal(err) + } + + if exp := numNodes*2 + 1; exp != opCount { + t.Fatalf("expected closure to be invoked %d times, got %d", exp, opCount) + } + }) + + t.Run("with-node-restart", func(t *testing.T) { + // Restart a node mid-way through execution. We expect EveryNode to + // start over from scratch and include the restarted node. + tc := nodelivenesstest.New(numNodes) + h := New(ClusterConfig{ + NodeLiveness: tc, + Dialer: NoopDialer{}, + }) + opCount := 0 + err := h.UntilClusterStable(ctx, func() error { + return h.ForEveryNode(ctx, "dummy-op", func( + context.Context, serverpb.MigrationClient, + ) error { + mu.Lock() + defer mu.Unlock() + + opCount++ + if opCount == numNodes { + tc.RestartNode(2) + } + + return nil + }) + }) + if err != nil { + t.Fatal(err) + } + + if exp := numNodes * 2; exp != opCount { + t.Fatalf("expected closure to be invoked %d times, got %d", exp, opCount) + } + }) + + t.Run("with-node-downNode", func(t *testing.T) { + // Down a node mid-way through execution. We expect EveryNode to error + // out. + const downedNode = 2 + tc := nodelivenesstest.New(numNodes) + h := New(ClusterConfig{ + NodeLiveness: tc, + Dialer: NoopDialer{}, + }) + expRe := fmt.Sprintf("n%d required, but unavailable", downedNode) + opCount := 0 + if err := h.UntilClusterStable(ctx, func() error { + return h.ForEveryNode(ctx, "dummy-op", func( + context.Context, serverpb.MigrationClient, + ) error { + mu.Lock() + defer mu.Unlock() + + opCount++ + if opCount == 1 { + tc.DownNode(downedNode) + } + return nil + }) + }); !testutils.IsError(err, expRe) { + t.Fatalf("expected error %q, got %q", expRe, err) + } + + tc.RestartNode(downedNode) + if err := h.UntilClusterStable(ctx, func() error { + return h.ForEveryNode(ctx, "dummy-op", func( + context.Context, serverpb.MigrationClient, + ) error { + return nil + }) + }); err != nil { + t.Fatal(err) + } + }) +} + +func TestClusterNodes(t *testing.T) { + defer leaktest.AfterTest(t) + + ctx := context.Background() + const numNodes = 3 + + t.Run("retrieves-all", func(t *testing.T) { + nl := nodelivenesstest.New(numNodes) + ns, err := NodesFromNodeLiveness(ctx, nl) + if err != nil { + t.Fatal(err) + } + + if got := len(ns); got != numNodes { + t.Fatalf("expected %d Nodes, got %d", numNodes, got) + } + + for i := range ns { + if exp := roachpb.NodeID(i + 1); exp != ns[i].ID { + t.Fatalf("expected to find node ID %s, got %s", exp, ns[i].ID) + } + if ns[i].Epoch != 1 { + t.Fatalf("expected to find Epoch=1, got %d", ns[i].Epoch) + } + } + }) + + t.Run("ignores-decommissioned", func(t *testing.T) { + nl := nodelivenesstest.New(numNodes) + + const decommissionedNode = 3 + nl.Decommission(decommissionedNode) + + ns, err := NodesFromNodeLiveness(ctx, nl) + if err != nil { + t.Fatal(err) + } + + if got := len(ns); got != numNodes-1 { + t.Fatalf("expected %d Nodes, got %d", numNodes-1, got) + } + + for i := range ns { + if exp := roachpb.NodeID(i + 1); exp != ns[i].ID { + t.Fatalf("expected to find node ID %s, got %s", exp, ns[i].ID) + } + if ns[i].Epoch != 1 { + t.Fatalf("expected to find Epoch=1, got %d", ns[i].Epoch) + } + } + }) + + t.Run("errors-if-down", func(t *testing.T) { + nl := nodelivenesstest.New(numNodes) + const downedNode = 3 + nl.DownNode(downedNode) + + _, err := NodesFromNodeLiveness(ctx, nl) + expRe := fmt.Sprintf("n%d required, but unavailable", downedNode) + if !testutils.IsError(err, expRe) { + t.Fatalf("expected error %q, got %q", expRe, err) + } + }) +} diff --git a/pkg/migration/migrationcluster/main_test.go b/pkg/migration/migrationcluster/main_test.go new file mode 100644 index 000000000000..0f87ea997cad --- /dev/null +++ b/pkg/migration/migrationcluster/main_test.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrationcluster_test + +import ( + "os" + "testing" + + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" +) + +func TestMain(m *testing.M) { + security.SetAssetLoader(securitytest.EmbeddedAssets) + serverutils.InitTestServerFactory(server.TestServerFactory) + serverutils.InitTestClusterFactory(testcluster.TestClusterFactory) + os.Exit(m.Run()) +} diff --git a/pkg/migration/migrationcluster/nodes.go b/pkg/migration/migrationcluster/nodes.go new file mode 100644 index 000000000000..15b789f90ec1 --- /dev/null +++ b/pkg/migration/migrationcluster/nodes.go @@ -0,0 +1,118 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrationcluster + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" +) + +// Node captures the relevant bits of each node as it pertains to the migration +// infrastructure. +type Node struct { + ID roachpb.NodeID + Epoch int64 +} + +// Nodes is a collection of node objects. +type Nodes []Node + +// NodesFromNodeLiveness returns the IDs and epochs for all nodes that are +// currently part of the cluster (i.e. they haven't been decommissioned away). +// Migrations have the pre-requisite that all nodes are up and running so that +// we're able to execute all relevant node-level operations on them. If any of +// the nodes are found to be unavailable, an error is returned. +// +// It's important to note that this makes no guarantees about new nodes +// being added to the cluster. It's entirely possible for that to happen +// concurrently with the retrieval of the current set of nodes. Appropriate +// usage of this entails wrapping it under a stabilizing loop, like we do in +// EveryNode. +func NodesFromNodeLiveness(ctx context.Context, nl NodeLiveness) (Nodes, error) { + var ns []Node + ls, err := nl.GetLivenessesFromKV(ctx) + if err != nil { + return nil, err + } + for _, l := range ls { + if l.Membership.Decommissioned() { + continue + } + live, err := nl.IsLive(l.NodeID) + if err != nil { + return nil, err + } + if !live { + return nil, errors.Newf("n%d required, but unavailable", l.NodeID) + } + ns = append(ns, Node{ID: l.NodeID, Epoch: l.Epoch}) + } + return ns, nil +} + +// Identical returns whether or not two lists of Nodes are identical as sets, +// and if not, what changed (in terms of cluster membership operations and epoch +// changes). The textual diffs are only to be used for logging purposes. +func (ns Nodes) Identical(other Nodes) (ok bool, _ []redact.RedactableString) { + a, b := ns, other + + type ent struct { + node Node + count int + epochChanged bool + } + m := map[roachpb.NodeID]ent{} + for _, node := range a { + m[node.ID] = ent{count: 1, node: node, epochChanged: false} + } + for _, node := range b { + e, ok := m[node.ID] + e.count-- + if ok && e.node.Epoch != node.Epoch { + e.epochChanged = true + } + m[node.ID] = e + } + + var diffs []redact.RedactableString + for id, e := range m { + if e.epochChanged { + diffs = append(diffs, redact.Sprintf("n%d's Epoch changed", id)) + } + if e.count > 0 { + diffs = append(diffs, redact.Sprintf("n%d was decommissioned", id)) + } + if e.count < 0 { + diffs = append(diffs, redact.Sprintf("n%d joined the cluster", id)) + } + } + + return len(diffs) == 0, diffs +} + +func (ns Nodes) String() string { + return redact.StringWithoutMarkers(ns) +} + +// SafeFormat implements redact.SafeFormatter. +func (ns Nodes) SafeFormat(s redact.SafePrinter, _ rune) { + s.SafeString("n{") + if len(ns) > 0 { + s.Printf("%d", ns[0].ID) + for _, node := range ns[1:] { + s.Printf(",%d", node.ID) + } + } + s.SafeString("}") +} diff --git a/pkg/migration/util_test.go b/pkg/migration/migrationcluster/nodes_test.go similarity index 78% rename from pkg/migration/util_test.go rename to pkg/migration/migrationcluster/nodes_test.go index 98d201da3935..dbd2713b8796 100644 --- a/pkg/migration/util_test.go +++ b/pkg/migration/migrationcluster/nodes_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package migration +package migrationcluster import ( "sort" @@ -23,16 +23,16 @@ import ( func TestNodesString(t *testing.T) { defer leaktest.AfterTest(t) - ns := func(ids ...int) nodes { - var nodes []node + ns := func(ids ...int) Nodes { + var nodes []Node for _, id := range ids { - nodes = append(nodes, node{id: roachpb.NodeID(id)}) + nodes = append(nodes, Node{ID: roachpb.NodeID(id)}) } return nodes } var tests = []struct { - ns nodes + ns Nodes exp string }{ {ns(), "n{}"}, @@ -51,8 +51,8 @@ func TestNodesString(t *testing.T) { func TestNodesIdentical(t *testing.T) { defer leaktest.AfterTest(t) - list := func(nodes ...string) nodes { // takes in strings of the form "id@epoch" - var ns []node + list := func(nodes ...string) Nodes { // takes in strings of the form "ID@Epoch" + var ns []Node for _, n := range nodes { parts := strings.Split(n, "@") id, err := strconv.Atoi(parts[0]) @@ -63,29 +63,29 @@ func TestNodesIdentical(t *testing.T) { if err != nil { t.Fatal(err) } - ns = append(ns, node{id: roachpb.NodeID(id), epoch: int64(epoch)}) + ns = append(ns, Node{ID: roachpb.NodeID(id), Epoch: int64(epoch)}) } return ns } var tests = []struct { - a, b nodes + a, b Nodes expOk bool expDiff string }{ {list(), list(), true, ""}, {list("1@2"), list("1@2"), true, ""}, {list("2@1", "1@2"), list("1@2", "2@1"), true, ""}, - {list("1@2"), list("1@3"), false, "n1's epoch changed"}, + {list("1@2"), list("1@3"), false, "n1's Epoch changed"}, {list("1@2"), list("1@2", "2@1"), false, "n2 joined the cluster"}, {list("1@1", "2@1"), list("1@1"), false, "n2 was decommissioned"}, - {list("3@2", "4@6"), list("4@8", "5@2"), false, "n3 was decommissioned, n4's epoch changed, n5 joined the cluster"}, + {list("3@2", "4@6"), list("4@8", "5@2"), false, "n3 was decommissioned, n4's Epoch changed, n5 joined the cluster"}, } for _, test := range tests { - ok, diffs := test.a.identical(test.b) + ok, diffs := test.a.Identical(test.b) if ok != test.expOk { - t.Fatalf("expected identical = %t, got %t", test.expOk, ok) + t.Fatalf("expected Identical = %t, got %t", test.expOk, ok) } strDiffs := make([]string, len(diffs)) diff --git a/pkg/migration/migrationjob/BUILD.bazel b/pkg/migration/migrationjob/BUILD.bazel new file mode 100644 index 000000000000..8b7717c32bae --- /dev/null +++ b/pkg/migration/migrationjob/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "migrationjob", + srcs = ["migration_job.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/migration/migrationjob", + visibility = ["//visibility:public"], + deps = [ + "//pkg/clusterversion", + "//pkg/jobs", + "//pkg/jobs/jobspb", + "//pkg/kv", + "//pkg/migration", + "//pkg/security", + "//pkg/settings/cluster", + "//pkg/sql", + "//pkg/sql/sem/tree", + "//pkg/sql/sessiondata", + "//pkg/sql/sqlutil", + "//pkg/util/timeutil", + "@com_github_cockroachdb_errors//:errors", + ], +) diff --git a/pkg/migration/migrationjob/migration_job.go b/pkg/migration/migrationjob/migration_job.go new file mode 100644 index 000000000000..f757766faa8b --- /dev/null +++ b/pkg/migration/migrationjob/migration_job.go @@ -0,0 +1,165 @@ +// Copyright 2018 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package migrationjob contains the jobs.Resumer implementation +// used for long-running migrations. +package migrationjob + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/migration" + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" +) + +func init() { + jobs.RegisterConstructor(jobspb.TypeMigration, func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer { + return &resumer{j: job} + }) +} + +// NewRecord constructs a new jobs.Record for this migration. +func NewRecord(version clusterversion.ClusterVersion, user security.SQLUsername) jobs.Record { + return jobs.Record{ + Description: "Migration to " + version.String(), + Details: jobspb.MigrationDetails{ + ClusterVersion: &version, + }, + Username: user, + Progress: jobspb.MigrationProgress{}, + NonCancelable: true, + } +} + +type resumer struct { + j *jobs.Job +} + +var _ jobs.Resumer = (*resumer)(nil) + +func (r resumer) Resume(ctx context.Context, execCtxI interface{}) error { + + execCtx := execCtxI.(sql.JobExecContext) + pl := r.j.Payload() + cv := *pl.GetMigration().ClusterVersion + ie := execCtx.ExecCfg().InternalExecutor + + alreadyCompleted, err := CheckIfMigrationCompleted(ctx, nil /* txn */, ie, cv) + if alreadyCompleted || err != nil { + return errors.Wrapf(err, "checking migration completion for %v", cv) + } + mc := execCtx.MigrationJobDeps() + m, ok := mc.GetMigration(cv) + if !ok { + // TODO(ajwerner): Consider treating this as an assertion failure. Jobs + // should only be created for a cluster version if there is an associated + // migration. It seems possible that a migration job could be launched by + // a node running a older version where a migration then runs on a job + // with a newer version where the migration has been re-ordered to be later. + // This should only happen between alphas but is theoretically not illegal. + return nil + } + switch m := m.(type) { + case *migration.KVMigration: + err = m.Run(ctx, cv, mc.Cluster()) + case *migration.SQLMigration: + err = m.Run(ctx, cv, migration.SQLDeps{ + DB: execCtx.ExecCfg().DB, + Codec: execCtx.ExecCfg().Codec, + Settings: execCtx.ExecCfg().Settings, + }) + default: + return errors.AssertionFailedf("unknown migration type %T", m) + } + if err != nil { + return errors.Wrapf(err, "running migration for %v", cv) + } + + // Mark the migration as having been completed so that subsequent iterations + // no-op and new jobs are not created. + if err := markMigrationCompleted(ctx, ie, cv); err != nil { + return errors.Wrapf(err, "marking migration complete for %v", cv) + } + return nil +} + +// CheckIfMigrationCompleted queries the system.migrations table to determine +// if the migration associated with this version has already been completed. +// The txn may be nil, in which case the check will be run in its own +// transaction. +func CheckIfMigrationCompleted( + ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, cv clusterversion.ClusterVersion, +) (alreadyCompleted bool, _ error) { + row, err := ie.QueryRow( + ctx, + "migration-job-find-already-completed", + txn, + ` +SELECT EXISTS( + SELECT * + FROM system.migrations + WHERE major = $1 + AND minor = $2 + AND patch = $3 + AND internal = $4 + ); +`, + cv.Major, + cv.Minor, + cv.Patch, + cv.Internal) + if err != nil { + return false, err + } + return bool(*row[0].(*tree.DBool)), nil +} + +func markMigrationCompleted( + ctx context.Context, ie sqlutil.InternalExecutor, cv clusterversion.ClusterVersion, +) error { + _, err := ie.ExecEx( + ctx, + "migration-job-mark-job-succeeded", + nil, /* txn */ + sessiondata.NodeUserSessionDataOverride, + ` +INSERT + INTO system.migrations + ( + major, + minor, + patch, + internal, + completed_at + ) +VALUES ($1, $2, $3, $4, $5)`, + cv.Major, + cv.Minor, + cv.Patch, + cv.Internal, + timeutil.Now()) + return err +} + +// The long-running migration resumer has no reverting logic. +func (r resumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error { + return nil +} diff --git a/pkg/migration/migrationmanager/BUILD.bazel b/pkg/migration/migrationmanager/BUILD.bazel new file mode 100644 index 000000000000..57064843ba0f --- /dev/null +++ b/pkg/migration/migrationmanager/BUILD.bazel @@ -0,0 +1,65 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "migrationmanager", + srcs = [ + "manager.go", + "testing_knobs.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/migration/migrationmanager", + visibility = ["//visibility:public"], + deps = [ + "//pkg/base", + "//pkg/clusterversion", + "//pkg/jobs", + "//pkg/keys", + "//pkg/kv", + "//pkg/migration", + "//pkg/migration/migrationjob", + "//pkg/migration/migrations", + "//pkg/security", + "//pkg/server/serverpb", + "//pkg/settings/cluster", + "//pkg/sql/protoreflect", + "//pkg/sql/sem/tree", + "//pkg/sql/sqlutil", + "//pkg/util/log", + "@com_github_cockroachdb_errors//:errors", + "@com_github_cockroachdb_logtags//:logtags", + "@com_github_cockroachdb_redact//:redact", + ], +) + +go_test( + name = "migrationmanager_test", + srcs = [ + "main_test.go", + "manager_external_test.go", + ], + deps = [ + ":migrationmanager", + "//pkg/base", + "//pkg/clusterversion", + "//pkg/jobs", + "//pkg/kv/kvserver/batcheval", + "//pkg/kv/kvserver/liveness", + "//pkg/migration", + "//pkg/roachpb", + "//pkg/security", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/settings/cluster", + "//pkg/sql/sqlutil", + "//pkg/testutils", + "//pkg/testutils/serverutils", + "//pkg/testutils/sqlutils", + "//pkg/testutils/testcluster", + "//pkg/util", + "//pkg/util/leaktest", + "//pkg/util/log", + "//pkg/util/tracing", + "@com_github_cockroachdb_errors//:errors", + "@com_github_stretchr_testify//require", + "@org_golang_x_sync//errgroup", + ], +) diff --git a/pkg/migration/migrationmanager/main_test.go b/pkg/migration/migrationmanager/main_test.go new file mode 100644 index 000000000000..a99ad25333fb --- /dev/null +++ b/pkg/migration/migrationmanager/main_test.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrationmanager_test + +import ( + "os" + "testing" + + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" +) + +func TestMain(m *testing.M) { + security.SetAssetLoader(securitytest.EmbeddedAssets) + serverutils.InitTestServerFactory(server.TestServerFactory) + serverutils.InitTestClusterFactory(testcluster.TestClusterFactory) + os.Exit(m.Run()) +} diff --git a/pkg/migration/migrationmanager/manager.go b/pkg/migration/migrationmanager/manager.go new file mode 100644 index 000000000000..893ce37a8d41 --- /dev/null +++ b/pkg/migration/migrationmanager/manager.go @@ -0,0 +1,352 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package migrationmanager provides an implementation of migration.Manager +// for use on kv nodes. +package migrationmanager + +import ( + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/migration" + "github.com/cockroachdb/cockroach/pkg/migration/migrationjob" + "github.com/cockroachdb/cockroach/pkg/migration/migrations" + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/protoreflect" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" + "github.com/cockroachdb/logtags" + "github.com/cockroachdb/redact" +) + +// Manager is the instance responsible for executing migrations across the +// cluster. +type Manager struct { + c migration.Cluster + ie sqlutil.InternalExecutor + jr *jobs.Registry + codec keys.SQLCodec + settings *cluster.Settings + knobs TestingKnobs +} + +// GetMigration returns the migration associated with this key. +func (m *Manager) GetMigration(key clusterversion.ClusterVersion) (migration.Migration, bool) { + if m.knobs.RegistryOverride != nil { + if m, ok := m.knobs.RegistryOverride(key); ok { + return m, ok + } + } + return migrations.GetMigration(key) +} + +// Cluster returns the cluster associated with this manager. It may be nil +// in a secondary tenant. +func (m *Manager) Cluster() migration.Cluster { + return m.c +} + +// NewManager constructs a new Manager. The Cluster parameter may be nil in +// secondary tenants. The testingKnobs parameter may be nil. +func NewManager( + c migration.Cluster, + ie sqlutil.InternalExecutor, + jr *jobs.Registry, + codec keys.SQLCodec, + settings *cluster.Settings, + testingKnobs *TestingKnobs, +) *Manager { + var knobs TestingKnobs + if testingKnobs != nil { + knobs = *testingKnobs + } + return &Manager{ + c: c, + ie: ie, + jr: jr, + codec: codec, + settings: settings, + knobs: knobs, + } +} + +var _ migration.JobDeps = (*Manager)(nil) + +// Migrate runs the set of migrations required to upgrade the cluster version +// from the current version to the target one. +func (m *Manager) Migrate( + ctx context.Context, user security.SQLUsername, from, to clusterversion.ClusterVersion, +) error { + // TODO(irfansharif): Should we inject every ctx here with specific labels + // for each migration, so they log distinctly? + ctx = logtags.AddTag(ctx, "migration-mgr", nil) + if from == to { + // Nothing to do here. + log.Infof(ctx, "no need to migrate, cluster already at newest version") + return nil + } + + clusterVersions := m.listBetween(from, to) + log.Infof(ctx, "migrating cluster from %s to %s (stepping through %s)", from, to, clusterVersions) + + for _, clusterVersion := range clusterVersions { + log.Infof(ctx, "stepping through %s", clusterVersion) + // First, run the actual migration if any. + if err := m.runMigration(ctx, user, clusterVersion); err != nil { + return err + } + + // Next we'll push out the version gate to every node in the cluster. + // Each node will persist the version, bump the local version gates, and + // then return. The migration associated with the specific version is + // executed before every node in the cluster has the corresponding + // version activated. Migrations that depend on a certain version + // already being activated will need to registered using a cluster + // version greater than it. + // + // For each intermediate version, we'll need to first bump the fence + // version before bumping the "real" one. Doing so allows us to provide + // the invariant that whenever a cluster version is active, all Nodes in + // the cluster (including ones added concurrently during version + // upgrades) are running binaries that know about the version. + + // Below-raft migrations mutate replica state, making use of the + // Migrate(version=V) primitive which they issue against the entire + // keyspace. These migrations typically want to rely on the invariant + // that there are no extant replicas in the system that haven't seen the + // specific Migrate command. + // + // This is partly achieved through the implementation of the Migrate + // command itself, which waits until it's applied on all followers[2] + // before returning. This also addresses the concern of extant snapshots + // with pre-migrated state possibly instantiating older version + // replicas. The intended learner replicas are listed as part of the + // range descriptor, and is also waited on for during command + // application. As for stale snapshots, if they specify a replicaID + // that's no longer part of the raft group, they're discarded by the + // recipient. Snapshots are also discarded unless they move the LAI + // forward. + // + // That still leaves rooms for replicas in the replica GC queue to evade + // detection. To address this, below-raft migrations typically take a + // two-phrase approach (the TruncatedAndRangeAppliedStateMigration being + // one example of this), where after having migrated the entire keyspace + // to version V, and after having prevented subsequent snapshots + // originating from replicas with versions < V, the migration sets out + // to purge outdated replicas in the system[3]. Specifically it + // processes all replicas in the GC queue with a version < V (which are + // not accessible during the application of the Migrate command). + // + // [1]: See ReplicaState.Version. + // [2]: See Replica.executeWriteBatch, specifically how proposals with the + // Migrate request are handled downstream of raft. + // [3]: See PurgeOutdatedReplicas from the KVMigration service. + + { + // The migrations infrastructure makes use of internal fence + // versions when stepping through consecutive versions. It's + // instructive to walk through how we expect a version migration + // from v21.1 to v21.2 to take place, and how we behave in the + // presence of new v21.1 or v21.2 Nodes being added to the cluster. + // + // - All Nodes are running v21.1 + // - All Nodes are rolled into v21.2 binaries, but with active + // cluster version still as v21.1 + // - The first version bump will be into v21.2-1(fence), see the + // migration manager above for where that happens + // + // Then concurrently: + // + // - A new node is added to the cluster, but running binary v21.1 + // - We try bumping the cluster gates to v21.2-1(fence) + // + // If the v21.1 Nodes manages to sneak in before the version bump, + // it's fine as the version bump is a no-op one (all fence versions + // are). Any subsequent bumps (including the "actual" one bumping to + // v21.2) will fail during the validation step where we'll first + // check to see that all Nodes are running v21.2 binaries. + // + // If the v21.1 node is only added after v21.2-1(fence) is active, + // it won't be able to actually join the cluster (it'll be prevented + // by the join RPC). + // + // All of which is to say that once we've seen the node list + // stabilize (as UntilClusterStable enforces), any new nodes that + // can join the cluster will run a release that support the fence + // version, and by design also supports the actual version (which is + // the direct successor of the fence). + fenceVersion := migration.FenceVersionFor(ctx, clusterVersion) + req := &serverpb.BumpClusterVersionRequest{ClusterVersion: &fenceVersion} + op := fmt.Sprintf("bump-cluster-version=%s", req.ClusterVersion.PrettyPrint()) + if err := m.c.UntilClusterStable(ctx, func() error { + return m.c.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { + _, err := client.BumpClusterVersion(ctx, req) + return err + }) + }); err != nil { + return err + } + } + { + // Now sanity check that we'll actually be able to perform the real + // cluster version bump, cluster-wide. + req := &serverpb.ValidateTargetClusterVersionRequest{ClusterVersion: &clusterVersion} + op := fmt.Sprintf("validate-cluster-version=%s", req.ClusterVersion.PrettyPrint()) + if err := m.c.UntilClusterStable(ctx, func() error { + return m.c.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { + _, err := client.ValidateTargetClusterVersion(ctx, req) + return err + }) + }); err != nil { + return err + } + } + { + // Finally, bump the real version cluster-wide. + req := &serverpb.BumpClusterVersionRequest{ClusterVersion: &clusterVersion} + op := fmt.Sprintf("bump-cluster-version=%s", req.ClusterVersion.PrettyPrint()) + if err := m.c.UntilClusterStable(ctx, func() error { + return m.c.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { + _, err := client.BumpClusterVersion(ctx, req) + return err + }) + }); err != nil { + return err + } + } + } + + return nil +} + +func (m *Manager) runMigration( + ctx context.Context, user security.SQLUsername, version clusterversion.ClusterVersion, +) error { + mig, exists := m.GetMigration(version) + if !exists { + return nil + } + // The migration which introduces the infrastructure for running other long + // running migrations in jobs. It needs to be special-cased and run without + // a job or leasing for bootstrapping purposes. Fortunately it has been + // designed to be idempotent and cheap. + // + // TODO(ajwerner): Remove in 21.2. + if version.Version == clusterversion.ByKey(clusterversion.LongRunningMigrations) { + return mig.(*migration.SQLMigration).Run(ctx, version, migration.SQLDeps{ + DB: m.c.DB(), + Codec: m.codec, + Settings: m.settings, + }) + } + alreadyCompleted, id, err := m.getOrCreateMigrationJob(ctx, user, version) + if alreadyCompleted || err != nil { + return err + } + return m.jr.Run(ctx, m.ie, []int64{id}) +} + +func (m *Manager) getOrCreateMigrationJob( + ctx context.Context, user security.SQLUsername, version clusterversion.ClusterVersion, +) (alreadyCompleted bool, jobID int64, _ error) { + if err := m.c.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + alreadyCompleted, err = migrationjob.CheckIfMigrationCompleted(ctx, txn, m.ie, version) + if alreadyCompleted || err != nil { + return err + } + var found bool + found, jobID, err = m.getRunningMigrationJob(ctx, txn, version) + if err != nil { + return err + } + if found { + return nil + } + var j *jobs.Job + j, err = m.jr.CreateJobWithTxn(ctx, migrationjob.NewRecord(version, user), txn) + if err != nil { + return err + } + jobID = *j.ID() + return nil + }); err != nil { + return false, 0, err + } + return alreadyCompleted, jobID, nil +} + +func (m *Manager) getRunningMigrationJob( + ctx context.Context, txn *kv.Txn, version clusterversion.ClusterVersion, +) (found bool, jobID int64, _ error) { + const query = ` +SELECT id, status + FROM ( + SELECT id, + status, + crdb_internal.pb_to_json( + 'cockroach.sql.jobs.jobspb.Payload', + payload, + false -- emit_defaults + ) AS pl + FROM system.jobs + WHERE status IN ` + jobs.NonTerminalStatusTupleString + ` + ) + WHERE pl->'migration'->'clusterVersion' = $1::JSON;` + jsonMsg, err := protoreflect.MessageToJSON(&version, false /* emitDefaults */) + if err != nil { + return false, 0, errors.Wrap(err, "failed to marshal version to JSON") + } + rows, err := m.ie.Query(ctx, "migration-manager-find-jobs", txn, query, jsonMsg.String()) + if err != nil { + return false, 0, err + } + parseRow := func(row tree.Datums) (id int64, status jobs.Status) { + return int64(*row[0].(*tree.DInt)), jobs.Status(*row[1].(*tree.DString)) + } + switch len(rows) { + case 0: + return false, 0, nil + case 1: + id, status := parseRow(rows[0]) + log.Infof(ctx, "found existing migration job %d for version %v in status %s, waiting", + id, &version, status) + return true, id, nil + default: + var buf redact.StringBuilder + buf.Printf("found multiple non-terminal jobs for version %s: [", redact.Safe(&version)) + for i, row := range rows { + if i > 0 { + buf.SafeString(", ") + } + id, status := parseRow(row) + buf.Printf("(%d, %s)", id, redact.Safe(status)) + } + log.Errorf(ctx, "%s", buf) + return false, 0, errors.AssertionFailedf("%s", buf) + } +} + +func (m *Manager) listBetween( + from clusterversion.ClusterVersion, to clusterversion.ClusterVersion, +) []clusterversion.ClusterVersion { + if m.knobs.ListBetweenOverride != nil { + return m.knobs.ListBetweenOverride(from, to) + } + return clusterversion.ListBetween(from, to) +} diff --git a/pkg/migration/migrationmanager/manager_external_test.go b/pkg/migration/migrationmanager/manager_external_test.go new file mode 100644 index 000000000000..7be8c63cb6d5 --- /dev/null +++ b/pkg/migration/migrationmanager/manager_external_test.go @@ -0,0 +1,467 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrationmanager_test + +import ( + "context" + gosql "database/sql" + "sync/atomic" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" + "github.com/cockroachdb/cockroach/pkg/migration" + "github.com/cockroachdb/cockroach/pkg/migration/migrationmanager" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/tracing" + "github.com/cockroachdb/errors" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +// TestAlreadyRunningJobsAreHandledProperly is a relatively low-level test to +// ensure that the behavior to detect running jobs is sane. The test intercepts +// and blocks a migration that it first runs. It then duplicates the job to +// break the single-running job invariant. It then ensures that that invariant +// violation is detected. After that errant job is finished, it ensures that +// concurrent attempts to bump the cluster version detect the already running +// migration and wait. +func TestAlreadyRunningJobsAreHandledProperly(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // We're going to be migrating from startCV to endCV. + startCV := clusterversion.ClusterVersion{Version: roachpb.Version{Major: 41}} + endCV := clusterversion.ClusterVersion{Version: roachpb.Version{Major: 42}} + + ch := make(chan chan error) + + ctx := context.Background() + tc := testcluster.StartTestCluster(t, 2, base.TestClusterArgs{ + ReplicationMode: base.ReplicationManual, + ServerArgs: base.TestServerArgs{ + Settings: cluster.MakeTestingClusterSettingsWithVersions(endCV.Version, startCV.Version, false), + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + BinaryVersionOverride: startCV.Version, + DisableAutomaticVersionUpgrade: 1, + }, + MigrationManager: &migrationmanager.TestingKnobs{ + ListBetweenOverride: func(from, to clusterversion.ClusterVersion) []clusterversion.ClusterVersion { + return []clusterversion.ClusterVersion{to} + }, + RegistryOverride: func(cv clusterversion.ClusterVersion) (migration.Migration, bool) { + if cv != endCV { + return nil, false + } + return migration.NewSQLMigration("test", cv, func( + ctx context.Context, version clusterversion.ClusterVersion, deps migration.SQLDeps, + ) error { + canResume := make(chan error) + ch <- canResume + return <-canResume + }), true + }, + }, + }, + }, + }) + defer tc.Stopper().Stop(ctx) + + upgrade1Err := make(chan error, 1) + go func() { + _, err := tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) + upgrade1Err <- err + }() + unblock := <-ch + + // Inject a second job for the same migration and ensure that that causes + // an error. This is pretty gnarly. + var secondID int64 + require.NoError(t, tc.ServerConn(0).QueryRow(` + INSERT + INTO system.jobs + ( + SELECT + unique_rowid(), + status, + created, + payload, + progress, + created_by_type, + created_by_id, + claim_session_id, + claim_instance_id + FROM system.jobs + WHERE ( + crdb_internal.pb_to_json( + 'cockroach.sql.jobs.jobspb.Payload', + payload + )->'migration' + ) IS NOT NULL + ) +RETURNING id;`).Scan(&secondID)) + + // Make sure that the second job gets run in a timely manner. + runErr := make(chan error) + go func() { + runErr <- tc.Server(0).JobRegistry().(*jobs.Registry). + Run( + ctx, + tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor), + []int64{secondID}, + ) + }() + fakeJobBlockChan := <-ch + + // Ensure that we see the assertion error. + _, err := tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) + require.Regexp(t, "found multiple non-terminal jobs for version", err) + + // Let the fake, erroneous job finish with an error. + fakeJobBlockChan <- errors.New("boom") + require.Regexp(t, "boom", <-runErr) + + // Launch a second migration which later we'll ensure does not kick off + // another job. We'll make sure this happens by polling the trace to see + // the log line indicating what we want. + recCtx, getRecording, cancel := tracing.ContextWithRecordingSpan(ctx, "test") + defer cancel() + upgrade2Err := make(chan error, 1) + go func() { + // Use an internal executor to get access to the trace as it happens. + _, err := tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor).Exec( + recCtx, "test", nil /* txn */, `SET CLUSTER SETTING version = $1`, endCV.String()) + upgrade2Err <- err + }() + + testutils.SucceedsSoon(t, func() error { + if tracing.FindMsgInRecording(getRecording(), "found existing migration job") > 0 { + return nil + } + return errors.Errorf("waiting for job to be discovered: %v", getRecording()) + }) + close(unblock) + require.NoError(t, <-upgrade1Err) + require.NoError(t, <-upgrade2Err) +} + +func TestMigrateUpdatesReplicaVersion(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // We're going to be migrating from startCV to endCV. + startCV := clusterversion.ClusterVersion{Version: roachpb.Version{Major: 41}} + endCV := clusterversion.ClusterVersion{Version: roachpb.Version{Major: 42}} + + var desc roachpb.RangeDescriptor + ctx := context.Background() + tc := testcluster.StartTestCluster(t, 2, base.TestClusterArgs{ + ReplicationMode: base.ReplicationManual, + ServerArgs: base.TestServerArgs{ + Settings: cluster.MakeTestingClusterSettingsWithVersions(endCV.Version, startCV.Version, false), + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + BinaryVersionOverride: startCV.Version, + DisableAutomaticVersionUpgrade: 1, + }, + MigrationManager: &migrationmanager.TestingKnobs{ + ListBetweenOverride: func(from, to clusterversion.ClusterVersion) []clusterversion.ClusterVersion { + return []clusterversion.ClusterVersion{from, to} + }, + RegistryOverride: func(cv clusterversion.ClusterVersion) (migration.Migration, bool) { + if cv != endCV { + return nil, false + } + return migration.NewKVMigration("test", cv, func( + ctx context.Context, version clusterversion.ClusterVersion, c migration.Cluster, + ) error { + return c.DB().Migrate(ctx, desc.StartKey, desc.EndKey, cv.Version) + }), true + }, + }, + }, + }, + }) + defer tc.Stopper().Stop(ctx) + // RegisterKVMigration the below raft migration. + unregisterKVMigration := batcheval.TestingRegisterMigrationInterceptor(endCV.Version, func() {}) + defer unregisterKVMigration() + + // We'll take a specific range, still running at startCV, generate an + // outgoing snapshot and then suspend it temporarily. We'll then bump the + // cluster version on all the stores, as part of the migration process, and + // then resume the snapshot process. Seeing as how the snapshot was + // generated pre-version bump, off of a version of the range that hadn't + // observed the migration corresponding to the latest cluster version, we + // expect the store to reject it. + + key := tc.ScratchRange(t) + require.NoError(t, tc.WaitForSplitAndInitialization(key)) + var err error + desc, err = tc.LookupRange(key) + require.NoError(t, err) + rangeID := desc.RangeID + + // Enqueue the replica in the raftsnapshot queue. We use SucceedsSoon + // because it may take a bit for raft to figure out that we need to be + // generating a snapshot. + store := tc.GetFirstStoreFromServer(t, 0) + repl, err := store.GetReplica(rangeID) + require.NoError(t, err) + + if got := repl.Version(); got != startCV.Version { + t.Fatalf("got replica version %s, expected %s", got, startCV.Version) + } + + // Wait until all nodes have are considered live. + nl := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) + testutils.SucceedsSoon(t, func() error { + for _, s := range tc.Servers { + id := s.NodeID() + live, err := nl.IsLive(id) + if err != nil { + return err + } + if !live { + return errors.Newf("n%s not live yet", id) + } + } + return nil + }) + + // Kick off the migration process. + _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) + require.NoError(t, err) + + if got := repl.Version(); got != endCV.Version { + t.Fatalf("got replica version %s, expected %s", got, endCV.Version) + } +} + +// TestConcurrentMigrationAttempts ensures that concurrent attempts to run +// migrations over a number of versions exhibits reasonable behavior. Namely, +// that each migration gets run one time and that migrations do not get run +// again. +func TestConcurrentMigrationAttempts(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // We're going to be migrating from startKey to endKey. We end up needing + // to use real versions because the ListBetween uses the keys compiled into + // the clusterversion package. + const ( + startMajor = 42 + endMajor = 48 + ) + migrationRunCounts := make(map[clusterversion.ClusterVersion]int) + + // RegisterKVMigration the migrations to update the map with run counts. + // There should definitely not be any concurrency of execution, so the race + // detector should not fire. + var versions []clusterversion.ClusterVersion + + for major := int32(startMajor); major <= endMajor; major++ { + versions = append(versions, clusterversion.ClusterVersion{ + Version: roachpb.Version{ + Major: major, + }, + }) + } + ctx := context.Background() + var active int32 // used to detect races + tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{ + ReplicationMode: base.ReplicationManual, + ServerArgs: base.TestServerArgs{ + Settings: cluster.MakeTestingClusterSettingsWithVersions( + versions[len(versions)-1].Version, + versions[0].Version, + false, + ), + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + BinaryVersionOverride: versions[0].Version, + DisableAutomaticVersionUpgrade: 1, + }, + MigrationManager: &migrationmanager.TestingKnobs{ + ListBetweenOverride: func(from, to clusterversion.ClusterVersion) []clusterversion.ClusterVersion { + return versions + }, + RegistryOverride: func(cv clusterversion.ClusterVersion) (migration.Migration, bool) { + return migration.NewKVMigration("test", cv, func( + ctx context.Context, version clusterversion.ClusterVersion, c migration.Cluster, + ) error { + if atomic.AddInt32(&active, 1) != 1 { + t.Error("unexpected concurrency") + } + time.Sleep(time.Millisecond) + atomic.AddInt32(&active, -1) + migrationRunCounts[version]++ + return nil + }), true + }, + }, + }, + }, + }) + defer tc.Stopper().Stop(ctx) + + // Run N instances of the migration concurrently on different connections. + // They should all eventually succeed; some may internally experience a + // serializable restart but cockroach will handle that transparently. + // Afterwards we'll ensure that no migration was run more than once. + N := 25 + if util.RaceEnabled { + N = 5 + } + db := tc.ServerConn(0) + db.SetMaxOpenConns(N) + conns := make([]*gosql.Conn, N) + for i := range conns { + var err error + conns[i], err = db.Conn(ctx) + require.NoError(t, err) + } + var g errgroup.Group + for i := 0; i < N; i++ { + conn := conns[i] + g.Go(func() error { + _, err := conn.ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + versions[len(versions)-1].String()) + return err + }) + } + require.Nil(t, g.Wait()) + for k, c := range migrationRunCounts { + require.Equalf(t, 1, c, "version: %v", k) + } + require.Len(t, migrationRunCounts, len(versions)) +} + +// TestPauseMigration ensures that migrations can indeed be paused and that +// concurrent attempts to perform a migration will block on the existing, +// paused job. +func TestPauseMigration(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + defer jobs.TestingSetAdoptAndCancelIntervals( + 10*time.Millisecond, 10*time.Millisecond, + )() + + // We're going to be migrating from startCV to endCV. + startCV := clusterversion.ClusterVersion{Version: roachpb.Version{Major: 41}} + endCV := clusterversion.ClusterVersion{Version: roachpb.Version{Major: 42}} + + type migrationEvent struct { + unblock chan<- error + canceled <-chan struct{} + } + ch := make(chan migrationEvent) + ctx := context.Background() + tc := testcluster.StartTestCluster(t, 2, base.TestClusterArgs{ + ReplicationMode: base.ReplicationManual, + ServerArgs: base.TestServerArgs{ + Settings: cluster.MakeTestingClusterSettingsWithVersions(endCV.Version, startCV.Version, false), + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + BinaryVersionOverride: startCV.Version, + DisableAutomaticVersionUpgrade: 1, + }, + MigrationManager: &migrationmanager.TestingKnobs{ + ListBetweenOverride: func(from, to clusterversion.ClusterVersion) []clusterversion.ClusterVersion { + return []clusterversion.ClusterVersion{to} + }, + RegistryOverride: func(cv clusterversion.ClusterVersion) (migration.Migration, bool) { + if cv != endCV { + return nil, false + } + return migration.NewSQLMigration("test", cv, func( + ctx context.Context, version clusterversion.ClusterVersion, deps migration.SQLDeps, + ) error { + canResume := make(chan error) + ch <- migrationEvent{ + unblock: canResume, + canceled: ctx.Done(), + } + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-canResume: + return err + } + }), true + }, + }, + }, + }, + }) + defer tc.Stopper().Stop(ctx) + + sqlDB := tc.ServerConn(0) + upgrade1Err := make(chan error, 1) + go func() { + _, err := sqlDB.ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) + upgrade1Err <- err + }() + ev := <-ch + + tdb := sqlutils.MakeSQLRunner(sqlDB) + var id int64 + tdb.QueryRow(t, ` +SELECT id + FROM system.jobs + WHERE ( + crdb_internal.pb_to_json( + 'cockroach.sql.jobs.jobspb.Payload', + payload + )->'migration' + ) IS NOT NULL;`). + Scan(&id) + tdb.Exec(t, "PAUSE JOB $1", id) + + <-ev.canceled + // Kick off another upgrade + upgrade2Err := make(chan error, 1) + go func() { + _, err := sqlDB.ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) + upgrade2Err <- err + }() + + // The upgrade should not be done. + select { + case err := <-upgrade1Err: + t.Fatalf("did not expect the first upgrade to finish: %v", err) + case err := <-upgrade2Err: + t.Fatalf("did not expect the second upgrade to finish: %v", err) + case <-ch: + t.Fatalf("did not expect the job to run again") + case <-time.After(10 * time.Millisecond): + } + tdb.Exec(t, "RESUME JOB $1", id) + ev = <-ch + close(ev.unblock) + require.NoError(t, <-upgrade1Err) + require.NoError(t, <-upgrade2Err) +} diff --git a/pkg/migration/migrationmanager/testing_knobs.go b/pkg/migration/migrationmanager/testing_knobs.go new file mode 100644 index 000000000000..669aec7b1123 --- /dev/null +++ b/pkg/migration/migrationmanager/testing_knobs.go @@ -0,0 +1,35 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrationmanager + +import ( + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/migration" +) + +// TestingKnobs are knobs to inject behavior into the migration manager which +// are useful for testing. +type TestingKnobs struct { + + // ListBetweenOverride injects an override for `clusterversion.ListBetween() + // in order to run migrations corresponding to versions which do not + // actually exist. + ListBetweenOverride func(from, to clusterversion.ClusterVersion) []clusterversion.ClusterVersion + + // RegistryOverride is used to inject migrations for specific cluster versions. + RegistryOverride func(cv clusterversion.ClusterVersion) (migration.Migration, bool) +} + +// ModuleTestingKnobs makes TestingKnobs a base.ModuleTestingKnobs. +func (t *TestingKnobs) ModuleTestingKnobs() {} + +var _ base.ModuleTestingKnobs = (*TestingKnobs)(nil) diff --git a/pkg/migration/migrations.go b/pkg/migration/migrations.go deleted file mode 100644 index 25ea7af8c3a3..000000000000 --- a/pkg/migration/migrations.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2020 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package migration - -import ( - "bytes" - "context" - "fmt" - - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server/serverpb" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/logtags" -) - -// registry defines the global mapping between a cluster version and the -// associated migration. The migration is only executed after a cluster-wide -// bump of the corresponding version gate. -var registry = make(map[clusterversion.ClusterVersion]Migration) - -func init() { - register(clusterversion.TruncatedAndRangeAppliedStateMigration, truncatedStateMigration, - "use unreplicated TruncatedState and RangeAppliedState for all ranges") - register(clusterversion.PostTruncatedAndRangeAppliedStateMigration, postTruncatedStateMigration, - "purge all replicas using the replicated TruncatedState") -} - -// Migration defines a program to be executed once every node in the cluster is -// (a) running a specific binary version, and (b) has completed all prior -// migrations. -// -// Each migration is associated with a specific internal cluster version and is -// idempotent in nature. When setting the cluster version (via `SET CLUSTER -// SETTING version`), the manager process determines the set of migrations -// needed to bridge the gap between the current active cluster version, and the -// target one. See [1] for where that happens. -// -// To introduce a migration, start by adding version key to pkg/clusterversion -// and introducing a corresponding internal cluster version for it. See [2] for -// more details. Following that, define a Migration in this package and add it -// to the registry. Be sure to key it in with the new cluster version we just -// added. During cluster upgrades, once the operator is able to set a cluster -// version setting that's past the version that was introduced (typically the -// major release version the migration was introduced in), the manager will -// execute the defined migration before letting the upgrade finalize. -// -// If the migration requires below-Raft level changes ([3] is one example), -// you'll need to add a version switch and the relevant KV-level migration in -// [4]. See IterateRangeDescriptors and the Migrate KV request for more details. -// -// [1]: `(*Manager).Migrate` -// [2]: pkg/clusterversion/cockroach_versions.go -// [3]: truncatedStateMigration -// [4]: pkg/kv/kvserver/batch_eval/cmd_migrate.go -// -// TODO(irfansharif): [3] and [4] are currently referring to what was prototyped -// in #57445. Once that makes its way into master, this TODO can be removed. -type Migration struct { - cv clusterversion.ClusterVersion - fn migrationFn - desc string -} - -type migrationFn func(context.Context, *Helper) error - -// Run kickstarts the actual migration process. It's responsible for recording -// the ongoing status of the migration into a system table. -// -// TODO(irfansharif): Introduce a `system.migrations` table, and populate it here. -func (m *Migration) Run(ctx context.Context, h *Helper) (err error) { - ctx = logtags.AddTag(ctx, "migration", h.ClusterVersion()) - - if err := m.fn(ctx, h); err != nil { - return err - } - - return nil -} - -// defaultPageSize controls how many range descriptors are paged in by default -// when iterating through all ranges in a cluster during any given migration. We -// pulled this number out of thin air(-ish). Let's consider a cluster with 50k -// ranges, with each range taking ~200ms. We're being somewhat conservative with -// the duration, but in a wide-area cluster with large hops between the manager -// and the replicas, it could be true. Here's how long it'll take for various -// block sizes: -// -// page size of 1 ~ 2h 46m -// page size of 50 ~ 3m 20s -// page size of 200 ~ 50s -const defaultPageSize = 200 - -func truncatedStateMigration(ctx context.Context, h *Helper) error { - return h.UntilClusterStable(ctx, func() error { - var batchIdx, numMigratedRanges int - init := func() { batchIdx, numMigratedRanges = 1, 0 } - if err := h.IterateRangeDescriptors(ctx, defaultPageSize, init, func(descriptors ...roachpb.RangeDescriptor) error { - for _, desc := range descriptors { - // NB: This is a bit of a wart. We want to reach the first range, - // but we can't address the (local) StartKey. However, keys.LocalMax - // is on r1, so we'll just use that instead to target r1. - start, end := desc.StartKey, desc.EndKey - if bytes.Compare(desc.StartKey, keys.LocalMax) < 0 { - start, _ = keys.Addr(keys.LocalMax) - } - if err := h.DB().Migrate(ctx, start, end, h.ClusterVersion().Version); err != nil { - return err - } - } - - // TODO(irfansharif): Instead of logging this to the debug log, we - // should be leveraging our jobs infrastructure for observability. - // See #58183. - numMigratedRanges += len(descriptors) - log.Infof(ctx, "[batch %d/??] migrated %d ranges", batchIdx, numMigratedRanges) - batchIdx++ - - return nil - }); err != nil { - return err - } - - log.Infof(ctx, "[batch %d/%d] migrated %d ranges", batchIdx, batchIdx, numMigratedRanges) - - // Make sure that all stores have synced. Given we're a below-raft - // migrations, this ensures that the applied state is flushed to disk. - req := &serverpb.SyncAllEnginesRequest{} - return h.ForEveryNode(ctx, "sync-engines", func(ctx context.Context, client serverpb.MigrationClient) error { - _, err := client.SyncAllEngines(ctx, req) - return err - }) - }) -} - -func postTruncatedStateMigration(ctx context.Context, h *Helper) error { - // Purge all replicas that haven't been migrated to use the unreplicated - // truncated state and the range applied state. We're sure to also durably - // persist any changes made in the same closure. Doing so in separate - // UntilClusterStable closure would run the (small) risk that a node might - // have GC-ed older replicas, restarted without syncing (thus unapplying the - // GC), and flushing all engines after. - truncStateVersion := clusterversion.ByKey(clusterversion.TruncatedAndRangeAppliedStateMigration) - op := fmt.Sprintf("purge-outdated-replicas-and-sync=%s", truncStateVersion) - err := h.UntilClusterStable(ctx, func() error { - err := h.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { - preq := &serverpb.PurgeOutdatedReplicasRequest{Version: &truncStateVersion} - _, err := client.PurgeOutdatedReplicas(ctx, preq) - if err != nil { - return err - } - - freq := &serverpb.SyncAllEnginesRequest{} - _, err = client.SyncAllEngines(ctx, freq) - return err - }) - return err - }) - - return err -} - -// TestingRegisterMigrationInterceptor is used in tests to register an -// interceptor for a version migration. -// -// TODO(irfansharif): This is a gross anti-pattern, we're letting tests mutate -// global state. This should instead be a testing knob that the migration -// manager checks when search for attached migrations. -func TestingRegisterMigrationInterceptor( - cv clusterversion.ClusterVersion, fn migrationFn, -) (unregister func()) { - registry[cv] = Migration{cv: cv, fn: fn} - return func() { delete(registry, cv) } -} diff --git a/pkg/migration/migrations/BUILD.bazel b/pkg/migration/migrations/BUILD.bazel new file mode 100644 index 000000000000..3c769384402e --- /dev/null +++ b/pkg/migration/migrations/BUILD.bazel @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "migrations", + srcs = [ + "migrations.go", + "migrations_table.go", + "truncated_state.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/migration/migrations", + visibility = ["//visibility:public"], + deps = [ + "//pkg/clusterversion", + "//pkg/keys", + "//pkg/migration", + "//pkg/roachpb", + "//pkg/server/serverpb", + "//pkg/sql/catalog/systemschema", + "//pkg/sqlmigrations", + "//pkg/util/log", + ], +) + +go_test( + name = "migrations_test", + srcs = [ + "main_test.go", + "truncated_state_external_test.go", + ], + deps = [ + "//pkg/base", + "//pkg/clusterversion", + "//pkg/kv/kvserver", + "//pkg/kv/kvserver/stateloader", + "//pkg/security", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/testutils/serverutils", + "//pkg/testutils/testcluster", + "//pkg/util/leaktest", + "@com_github_cockroachdb_errors//:errors", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/migration/main_test.go b/pkg/migration/migrations/main_test.go similarity index 97% rename from pkg/migration/main_test.go rename to pkg/migration/migrations/main_test.go index cdc3f7742a5a..5a38c8b0af33 100644 --- a/pkg/migration/main_test.go +++ b/pkg/migration/migrations/main_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package migration_test +package migrations_test import ( "os" diff --git a/pkg/migration/migrations/migrations.go b/pkg/migration/migrations/migrations.go new file mode 100644 index 000000000000..ea172ca310b4 --- /dev/null +++ b/pkg/migration/migrations/migrations.go @@ -0,0 +1,62 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package migrations contains the implementation of migrations. It is imported +// by the server library. +// +// This package registers the migrations with the migration package. +package migrations + +import ( + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/migration" +) + +// GetMigration returns the migration corresponding to this version if +// one exists. +func GetMigration(key clusterversion.ClusterVersion) (migration.Migration, bool) { + m, ok := registry[key] + return m, ok +} + +// registry defines the global mapping between a cluster version and the +// associated migration. The migration is only executed after a cluster-wide +// bump of the corresponding version gate. +var registry = make(map[clusterversion.ClusterVersion]migration.Migration) + +var migrations = []migration.Migration{ + migration.NewSQLMigration( + "add the system.migrations table", + toCV(clusterversion.LongRunningMigrations), + migrationsTableMigration, + ), + migration.NewKVMigration( + "use unreplicated TruncatedState and RangeAppliedState for all ranges", + toCV(clusterversion.TruncatedAndRangeAppliedStateMigration), + truncatedStateMigration, + ), + migration.NewKVMigration( + "purge all replicas using the replicated TruncatedState", + toCV(clusterversion.PostTruncatedAndRangeAppliedStateMigration), + postTruncatedStateMigration, + ), +} + +func init() { + for _, m := range migrations { + registry[m.ClusterVersion()] = m + } +} + +func toCV(key clusterversion.Key) clusterversion.ClusterVersion { + return clusterversion.ClusterVersion{ + Version: clusterversion.ByKey(key), + } +} diff --git a/pkg/migration/migrations/migrations_table.go b/pkg/migration/migrations/migrations_table.go new file mode 100644 index 000000000000..f6592c55a6ba --- /dev/null +++ b/pkg/migration/migrations/migrations_table.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrations + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/migration" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sqlmigrations" +) + +func migrationsTableMigration( + ctx context.Context, _ clusterversion.ClusterVersion, d migration.SQLDeps, +) error { + return sqlmigrations.CreateSystemTable( + ctx, d.DB, d.Codec, d.Settings, systemschema.MigrationsTable, + ) +} diff --git a/pkg/migration/migrations/truncated_state.go b/pkg/migration/migrations/truncated_state.go new file mode 100644 index 000000000000..3129643ea6b7 --- /dev/null +++ b/pkg/migration/migrations/truncated_state.go @@ -0,0 +1,94 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrations + +import ( + "bytes" + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/migration" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "github.com/cockroachdb/cockroach/pkg/util/log" +) + +// defaultPageSize controls how many ranges are paged in by default when +// iterating through all ranges in a cluster during any given migration. We +// pulled this number out of thin air(-ish). Let's consider a cluster with 50k +// ranges, with each range taking ~200ms. We're being somewhat conservative with +// the duration, but in a wide-area cluster with large hops between the manager +// and the replicas, it could be true. Here's how long it'll take for various +// block sizes: +// +// page size of 1 ~ 2h 46m +// page size of 50 ~ 3m 20s +// page size of 200 ~ 50s +const defaultPageSize = 200 + +func truncatedStateMigration( + ctx context.Context, cv clusterversion.ClusterVersion, h migration.Cluster, +) error { + var batchIdx, numMigratedRanges int + init := func() { batchIdx, numMigratedRanges = 1, 0 } + if err := h.IterateRangeDescriptors(ctx, defaultPageSize, init, func(descriptors ...roachpb.RangeDescriptor) error { + for _, desc := range descriptors { + // NB: This is a bit of a wart. We want to reach the first range, + // but we can't address the (local) StartKey. However, keys.LocalMax + // is on r1, so we'll just use that instead to target r1. + start, end := desc.StartKey, desc.EndKey + if bytes.Compare(desc.StartKey, keys.LocalMax) < 0 { + start, _ = keys.Addr(keys.LocalMax) + } + if err := h.DB().Migrate(ctx, start, end, cv.Version); err != nil { + return err + } + } + + // TODO(irfansharif): Instead of logging this to the debug log, we + // should insert these into a `system.migrations` table for external + // observability. + numMigratedRanges += len(descriptors) + log.Infof(ctx, "[batch %d/??] migrated %d ranges", batchIdx, numMigratedRanges) + batchIdx++ + + return nil + }); err != nil { + return err + } + + log.Infof(ctx, "[batch %d/%d] migrated %d ranges", batchIdx, batchIdx, numMigratedRanges) + + // Make sure that all stores have synced. Given we're a below-raft + // migrations, this ensures that the applied state is flushed to disk. + req := &serverpb.SyncAllEnginesRequest{} + op := "flush-stores" + return h.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { + _, err := client.SyncAllEngines(ctx, req) + return err + }) +} + +func postTruncatedStateMigration( + ctx context.Context, cv clusterversion.ClusterVersion, h migration.Cluster, +) error { + // Purge all replicas that haven't been migrated to use the unreplicated + // truncated state and the range applied state. + truncStateVersion := clusterversion.ByKey(clusterversion.TruncatedAndRangeAppliedStateMigration) + req := &serverpb.PurgeOutdatedReplicasRequest{Version: &truncStateVersion} + op := fmt.Sprintf("purge-outdated-replicas=%s", req.Version) + return h.ForEveryNode(ctx, op, func(ctx context.Context, client serverpb.MigrationClient) error { + _, err := client.PurgeOutdatedReplicas(ctx, req) + return err + }) +} diff --git a/pkg/migration/migrations_test.go b/pkg/migration/migrations/truncated_state_external_test.go similarity index 58% rename from pkg/migration/migrations_test.go rename to pkg/migration/migrations/truncated_state_external_test.go index d099ed1376eb..d27943b86032 100644 --- a/pkg/migration/migrations_test.go +++ b/pkg/migration/migrations/truncated_state_external_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package migration_test +package migrations_test import ( "context" @@ -18,17 +18,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" - "github.com/cockroachdb/cockroach/pkg/migration" - "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" - "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -67,7 +60,7 @@ func TestTruncatedStateMigration(t *testing.T) { for i := 0; i < tc.NumServers(); i++ { err := tc.Server(i).GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { var err error - s.VisitReplicas(func(repl *kvserver.Replica) (wantMore bool) { + s.VisitReplicas(func(repl *kvserver.Replica) bool { err = f(repl) return err == nil }) @@ -140,86 +133,3 @@ func TestTruncatedStateMigration(t *testing.T) { }) } } - -func TestMigrateUpdatesReplicaVersion(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - // We're going to be migrating from startCV to endCV. - startCV := clusterversion.ClusterVersion{Version: roachpb.Version{Major: 41}} - endCV := clusterversion.ClusterVersion{Version: roachpb.Version{Major: 42}} - - ctx := context.Background() - tc := testcluster.StartTestCluster(t, 2, base.TestClusterArgs{ - ReplicationMode: base.ReplicationManual, - ServerArgs: base.TestServerArgs{ - Settings: cluster.MakeTestingClusterSettingsWithVersions(endCV.Version, startCV.Version, false), - Knobs: base.TestingKnobs{ - Server: &server.TestingKnobs{ - BinaryVersionOverride: startCV.Version, - DisableAutomaticVersionUpgrade: 1, - }, - }, - }, - }) - defer tc.Stopper().Stop(ctx) - - // We'll take a specific range, still running at startCV, generate an - // outgoing snapshot and then suspend it temporarily. We'll then bump the - // cluster version on all the stores, as part of the migration process, and - // then resume the snapshot process. Seeing as how the snapshot was - // generated pre-version bump, off of a version of the range that hadn't - // observed the migration corresponding to the latest cluster version, we - // expect the store to reject it. - - key := tc.ScratchRange(t) - require.NoError(t, tc.WaitForSplitAndInitialization(key)) - desc, err := tc.LookupRange(key) - require.NoError(t, err) - rangeID := desc.RangeID - - // Enqueue the replica in the raftsnapshot queue. We use SucceedsSoon - // because it may take a bit for raft to figure out that we need to be - // generating a snapshot. - store := tc.GetFirstStoreFromServer(t, 0) - repl, err := store.GetReplica(rangeID) - require.NoError(t, err) - - if got := repl.Version(); got != startCV.Version { - t.Fatalf("got replica version %s, expected %s", got, startCV.Version) - } - - // Register the below raft migration. - unregisterKVMigration := batcheval.TestingRegisterMigrationInterceptor(endCV.Version, func() {}) - defer unregisterKVMigration() - - // Register the top-level migration. - unregister := migration.TestingRegisterMigrationInterceptor(endCV, func(ctx context.Context, h *migration.Helper) error { - return h.DB().Migrate(ctx, desc.StartKey, desc.EndKey, h.ClusterVersion().Version) - }) - defer unregister() - - // Wait until all nodes have are considered live. - nl := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) - testutils.SucceedsSoon(t, func() error { - for _, s := range tc.Servers { - id := s.NodeID() - live, err := nl.IsLive(id) - if err != nil { - return err - } - if !live { - return errors.Newf("n%s not live yet", id) - } - } - return nil - }) - - // Kick off the migration process. - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) - require.NoError(t, err) - - if got := repl.Version(); got != endCV.Version { - t.Fatalf("got replica version %s, expected %s", got, endCV.Version) - } -} diff --git a/pkg/migration/nodelivenesstest/BUILD.bazel b/pkg/migration/nodelivenesstest/BUILD.bazel new file mode 100644 index 000000000000..027901493e8c --- /dev/null +++ b/pkg/migration/nodelivenesstest/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "nodelivenesstest", + srcs = ["test_node_liveness.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/migration/nodelivenesstest", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kv/kvserver/liveness/livenesspb", + "//pkg/roachpb", + ], +) diff --git a/pkg/migration/nodelivenesstest/test_node_liveness.go b/pkg/migration/nodelivenesstest/test_node_liveness.go new file mode 100644 index 000000000000..e930ad1d83e1 --- /dev/null +++ b/pkg/migration/nodelivenesstest/test_node_liveness.go @@ -0,0 +1,96 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package nodelivenesstest provides a mock implementation of NodeLiveness +// to facilitate testing of migration infrastructure. +package nodelivenesstest + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" + "github.com/cockroachdb/cockroach/pkg/roachpb" +) + +// NodeLiveness is a testing-only implementation of the NodeLiveness. It +// lets tests mock out restarting, killing, decommissioning and adding Nodes to +// the cluster. +type NodeLiveness struct { + ls []livenesspb.Liveness + dead map[roachpb.NodeID]struct{} +} + +// New constructs a new NodeLiveness with the specified number of nodes. +func New(numNodes int) *NodeLiveness { + nl := &NodeLiveness{ + ls: make([]livenesspb.Liveness, numNodes), + dead: make(map[roachpb.NodeID]struct{}), + } + for i := 0; i < numNodes; i++ { + nl.ls[i] = livenesspb.Liveness{ + NodeID: roachpb.NodeID(i + 1), Epoch: 1, + Membership: livenesspb.MembershipStatus_ACTIVE, + } + } + return nl +} + +// GetLivenessesFromKV implements the NodeLiveness interface. +func (t *NodeLiveness) GetLivenessesFromKV(context.Context) ([]livenesspb.Liveness, error) { + return t.ls, nil +} + +// IsLive implements the NodeLiveness interface. +func (t *NodeLiveness) IsLive(id roachpb.NodeID) (bool, error) { + _, dead := t.dead[id] + return !dead, nil +} + +// Decommission marks the specified node as decommissioned. +func (t *NodeLiveness) Decommission(id roachpb.NodeID) { + for i := range t.ls { + if t.ls[i].NodeID == id { + t.ls[i].Membership = livenesspb.MembershipStatus_DECOMMISSIONED + break + } + } +} + +// AddNewNode adds a new node with an ID greater than all other nodes. +func (t *NodeLiveness) AddNewNode() { + t.AddNode(roachpb.NodeID(len(t.ls) + 1)) +} + +// AddNode adds a new node with the specified ID. +func (t *NodeLiveness) AddNode(id roachpb.NodeID) { + t.ls = append(t.ls, livenesspb.Liveness{ + NodeID: id, + Epoch: 1, + Membership: livenesspb.MembershipStatus_ACTIVE, + }) +} + +// DownNode marks a given node as down. +func (t *NodeLiveness) DownNode(id roachpb.NodeID) { + t.dead[id] = struct{}{} +} + +// RestartNode increments the epoch for a given node and marks it as +// alive. +func (t *NodeLiveness) RestartNode(id roachpb.NodeID) { + for i := range t.ls { + if t.ls[i].NodeID == id { + t.ls[i].Epoch++ + break + } + } + + delete(t.dead, id) +} diff --git a/pkg/migration/sql_migration.go b/pkg/migration/sql_migration.go new file mode 100644 index 000000000000..6d2b5fe2e4ab --- /dev/null +++ b/pkg/migration/sql_migration.go @@ -0,0 +1,61 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migration + +import ( + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/logtags" +) + +// SQLDeps are the dependencies of migrations which perform actions at the +// SQL layer. +type SQLDeps struct { + DB *kv.DB + Codec keys.SQLCodec + Settings *cluster.Settings +} + +// SQLMigrationFn is used to perform sql-level migrations. It may be run from +// any tenant. +type SQLMigrationFn func(context.Context, clusterversion.ClusterVersion, SQLDeps) error + +// SQLMigration is an implementation of Migration for SQL-level migrations. +type SQLMigration struct { + migration + fn SQLMigrationFn +} + +// NewSQLMigration constructs a SQLMigration. +func NewSQLMigration( + description string, cv clusterversion.ClusterVersion, fn SQLMigrationFn, +) *SQLMigration { + return &SQLMigration{ + migration: migration{ + description: description, + cv: cv, + }, + fn: fn, + } +} + +// Run kickstarts the actual migration process for SQL-level migrations. +func (m *SQLMigration) Run( + ctx context.Context, cv clusterversion.ClusterVersion, d SQLDeps, +) (err error) { + ctx = logtags.AddTag(ctx, fmt.Sprintf("migration=%s", cv), nil) + return m.fn(ctx, cv, d) +} diff --git a/pkg/migration/util.go b/pkg/migration/util.go deleted file mode 100644 index 0814456b8c1e..000000000000 --- a/pkg/migration/util.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2020 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package migration - -import ( - "context" - - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/redact" -) - -// node captures the relevant bits of each node as it pertains to the migration -// infrastructure. -type node struct { - id roachpb.NodeID - epoch int64 -} - -// nodes is a collection of node objects. -type nodes []node - -// identical returns whether or not two lists of nodes are identical as sets, -// and if not, what changed (in terms of cluster membership operations and epoch -// changes). The textual diffs are only to be used for logging purposes. -func (ns nodes) identical(other nodes) (ok bool, _ []redact.RedactableString) { - a, b := ns, other - - type ent struct { - node node - count int - epochChanged bool - } - m := map[roachpb.NodeID]ent{} - for _, node := range a { - m[node.id] = ent{count: 1, node: node, epochChanged: false} - } - for _, node := range b { - e, ok := m[node.id] - e.count-- - if ok && e.node.epoch != node.epoch { - e.epochChanged = true - } - m[node.id] = e - } - - var diffs []redact.RedactableString - for id, e := range m { - if e.epochChanged { - diffs = append(diffs, redact.Sprintf("n%d's epoch changed", id)) - } - if e.count > 0 { - diffs = append(diffs, redact.Sprintf("n%d was decommissioned", id)) - } - if e.count < 0 { - diffs = append(diffs, redact.Sprintf("n%d joined the cluster", id)) - } - } - - return len(diffs) == 0, diffs -} - -func (ns nodes) String() string { - return redact.StringWithoutMarkers(ns) -} - -// SafeFormat implements redact.SafeFormatter. -func (ns nodes) SafeFormat(s redact.SafePrinter, _ rune) { - s.SafeString("n{") - if len(ns) > 0 { - s.Printf("%d", ns[0].id) - for _, node := range ns[1:] { - s.Printf(",%d", node.id) - } - } - s.SafeString("}") -} - -// fenceVersionFor constructs the appropriate "fence version" for the given -// cluster version. Fence versions allow the migrations infrastructure to safely -// step through consecutive cluster versions in the presence of nodes (running -// any binary version) being added to the cluster. See the migration manager -// above for intended usage. -// -// Fence versions (and the migrations infrastructure entirely) were introduced -// in the 21.1 release cycle. In the same release cycle, we introduced the -// invariant that new user-defined versions (users being crdb engineers) must -// always have even-numbered Internal versions, thus reserving the odd numbers -// to slot in fence versions for each cluster version. See top-level -// documentation in pkg/clusterversion for more details. -func fenceVersionFor( - ctx context.Context, cv clusterversion.ClusterVersion, -) clusterversion.ClusterVersion { - if (cv.Internal % 2) != 0 { - log.Fatalf(ctx, "only even numbered internal versions allowed, found %s", cv.Version) - } - - // We'll pick the odd internal version preceding the cluster version, - // slotting ourselves right before it. - fenceCV := cv - fenceCV.Internal-- - return fenceCV -} - -// register is a short hand to register a given migration within the global -// registry. -func register(key clusterversion.Key, fn migrationFn, desc string) { - cv := clusterversion.ClusterVersion{Version: clusterversion.ByKey(key)} - if _, ok := registry[cv]; ok { - log.Fatalf(context.Background(), "doubly registering migration for %s", cv) - } - registry[cv] = Migration{cv: cv, fn: fn, desc: desc} -} diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index 8a780fb7fe3f..4f9325f307d3 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -75,7 +75,8 @@ go_library( "//pkg/kv/kvserver/protectedts/ptprovider", "//pkg/kv/kvserver/protectedts/ptreconcile", "//pkg/kv/kvserver/reports", - "//pkg/migration", + "//pkg/migration/migrationcluster", + "//pkg/migration/migrationmanager", "//pkg/roachpb", "//pkg/rpc", "//pkg/rpc/nodedialer", diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go index c34b77b4889d..b7f083b15d3f 100644 --- a/pkg/server/server_sql.go +++ b/pkg/server/server_sql.go @@ -32,7 +32,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvtenant" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" - "github.com/cockroachdb/cockroach/pkg/migration" + "github.com/cockroachdb/cockroach/pkg/migration/migrationcluster" + "github.com/cockroachdb/cockroach/pkg/migration/migrationmanager" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" @@ -625,15 +626,21 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { // We only need to attach a version upgrade hook if we're the system // tenant. Regular tenants are disallowed from changing cluster // versions. - migrationMgr := migration.NewManager( - cfg.nodeDialer, - nodeLiveness, - cfg.circularInternalExecutor, - cfg.db, + // + // TODO(ajwerner): Allow tenants to set their cluster version and to + // perform sql migrations through the migration infrastructure. + // See #48436. + c := migrationcluster.New(migrationcluster.ClusterConfig{ + NodeLiveness: nodeLiveness, + Dialer: cfg.nodeDialer, + DB: cfg.db, + }) + knobs, _ := cfg.TestingKnobs.MigrationManager.(*migrationmanager.TestingKnobs) + migrationMgr := migrationmanager.NewManager( + c, cfg.circularInternalExecutor, jobRegistry, codec, cfg.Settings, knobs, ) - execCfg.VersionUpgradeHook = func(ctx context.Context, from, to clusterversion.ClusterVersion) error { - return migrationMgr.Migrate(ctx, from, to) - } + execCfg.MigrationJobDeps = migrationMgr + execCfg.VersionUpgradeHook = migrationMgr.Migrate } temporaryObjectCleaner := sql.NewTemporaryObjectCleaner( diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel index 6393275322eb..6428fb8a8e47 100644 --- a/pkg/sql/BUILD.bazel +++ b/pkg/sql/BUILD.bazel @@ -250,6 +250,7 @@ go_library( "//pkg/kv/kvserver", "//pkg/kv/kvserver/liveness/livenesspb", "//pkg/kv/kvserver/protectedts", + "//pkg/migration", "//pkg/roachpb", "//pkg/rpc", "//pkg/rpc/nodedialer", diff --git a/pkg/sql/catalog/bootstrap/metadata.go b/pkg/sql/catalog/bootstrap/metadata.go index 7818715e16f8..bdbb1439f62a 100644 --- a/pkg/sql/catalog/bootstrap/metadata.go +++ b/pkg/sql/catalog/bootstrap/metadata.go @@ -342,6 +342,7 @@ func addSystemDescriptorsToSchema(target *MetadataSchema) { target.AddDescriptor(keys.SystemDatabaseID, systemschema.ScheduledJobsTable) target.AddDescriptor(keys.SystemDatabaseID, systemschema.SqllivenessTable) + target.AddDescriptor(keys.SystemDatabaseID, systemschema.MigrationsTable) } // addSplitIDs adds a split point for each of the PseudoTableIDs to the supplied diff --git a/pkg/sql/catalog/descpb/privilege.go b/pkg/sql/catalog/descpb/privilege.go index f59406574fca..c128985acbf2 100644 --- a/pkg/sql/catalog/descpb/privilege.go +++ b/pkg/sql/catalog/descpb/privilege.go @@ -435,6 +435,7 @@ var SystemAllowedPrivileges = map[ID]privilege.List{ keys.StatementDiagnosticsTableID: privilege.ReadWriteData, keys.ScheduledJobsTableID: privilege.ReadWriteData, keys.SqllivenessID: privilege.ReadWriteData, + keys.MigrationsID: privilege.ReadWriteData, } // SetOwner sets the owner of the privilege descriptor to the provided string. diff --git a/pkg/sql/catalog/systemschema/system.go b/pkg/sql/catalog/systemschema/system.go index 5adbb79e2b60..bb452a734f3c 100644 --- a/pkg/sql/catalog/systemschema/system.go +++ b/pkg/sql/catalog/systemschema/system.go @@ -345,6 +345,17 @@ CREATE TABLE system.sqlliveness ( expiration DECIMAL NOT NULL, FAMILY fam0_session_id_expiration (session_id, expiration) )` + + MigrationsTableSchema = ` +CREATE TABLE system.migrations ( + major INT8 NOT NULL, + minor INT8 NOT NULL, + patch INT8 NOT NULL, + internal INT8 NOT NULL, + completed_at TIMESTAMPTZ NOT NULL, + FAMILY "primary" (major, minor, patch, internal, completed_at), + PRIMARY KEY (major, minor, patch, internal) +)` ) func pk(name string) descpb.IndexDescriptor { @@ -1690,6 +1701,54 @@ var ( FormatVersion: descpb.InterleavedFormatVersion, NextMutationID: 1, }) + + // MigrationsTable is the descriptor for the migrations table. It stores facts + // about the completion state of long-running migrations. It is used to + // prevent migrations from running again after they have been completed. + MigrationsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + Name: "migrations", + ID: keys.MigrationsID, + ParentID: keys.SystemDatabaseID, + UnexposedParentSchemaID: keys.PublicSchemaID, + Version: 1, + Columns: []descpb.ColumnDescriptor{ + {Name: "major", ID: 1, Type: types.Int, Nullable: false}, + {Name: "minor", ID: 2, Type: types.Int, Nullable: false}, + {Name: "patch", ID: 3, Type: types.Int, Nullable: false}, + {Name: "internal", ID: 4, Type: types.Int, Nullable: false}, + {Name: "completed_at", ID: 5, Type: types.TimestampTZ, Nullable: false}, + }, + NextColumnID: 6, + Families: []descpb.ColumnFamilyDescriptor{ + { + Name: "primary", + ID: 0, + ColumnNames: []string{"major", "minor", "patch", "internal", "completed_at"}, + ColumnIDs: []descpb.ColumnID{1, 2, 3, 4, 5}, + DefaultColumnID: 5, + }, + }, + NextFamilyID: 1, + PrimaryIndex: descpb.IndexDescriptor{ + Name: tabledesc.PrimaryKeyIndexName, + ID: 1, + Unique: true, + ColumnNames: []string{"major", "minor", "patch", "internal"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{ + descpb.IndexDescriptor_ASC, + descpb.IndexDescriptor_ASC, + descpb.IndexDescriptor_ASC, + descpb.IndexDescriptor_ASC, + }, + ColumnIDs: []descpb.ColumnID{1, 2, 3, 4}, + Version: descpb.EmptyArraysInInvertedIndexesVersion, + }, + NextIndexID: 2, + Privileges: descpb.NewCustomSuperuserPrivilegeDescriptor( + descpb.SystemAllowedPrivileges[keys.JobsTableID], security.NodeUserName()), + FormatVersion: descpb.InterleavedFormatVersion, + NextMutationID: 1, + }) ) // newCommentPrivilegeDescriptor returns a privilege descriptor for comment table diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index c787e3d9044e..1bf924c0ffbf 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -36,6 +36,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" + "github.com/cockroachdb/cockroach/pkg/migration" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security" @@ -796,8 +797,16 @@ type ExecutorConfig struct { // VersionUpgradeHook is called after validating a `SET CLUSTER SETTING // version` but before executing it. It can carry out arbitrary migrations - // that allow us to eventually remove legacy code. - VersionUpgradeHook func(ctx context.Context, from, to clusterversion.ClusterVersion) error + // that allow us to eventually remove legacy code. It will only be populated + // on the system tenant. + // + // TODO(tbg,irfansharif,ajwerner): Hook up for secondary tenants. + VersionUpgradeHook func(ctx context.Context, user security.SQLUsername, from, to clusterversion.ClusterVersion) error + + // MigrationJobDeps is used to drive migrations. + // + // TODO(tbg,irfansharif,ajwerner): Hook up for secondary tenants. + MigrationJobDeps migration.JobDeps // IndexBackfiller is used to backfill indexes. It is another rather circular // object which mostly just holds on to an ExecConfig. diff --git a/pkg/sql/job_exec_context.go b/pkg/sql/job_exec_context.go index 31f87c07ca61..e8a94efd6377 100644 --- a/pkg/sql/job_exec_context.go +++ b/pkg/sql/job_exec_context.go @@ -11,6 +11,7 @@ package sql import ( + "github.com/cockroachdb/cockroach/pkg/migration" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -45,6 +46,9 @@ func (e *plannerJobExecContext) ExecCfg() *ExecutorConfig { return e.p.Ex func (e *plannerJobExecContext) DistSQLPlanner() *DistSQLPlanner { return e.p.DistSQLPlanner() } func (e *plannerJobExecContext) LeaseMgr() *lease.Manager { return e.p.LeaseMgr() } func (e *plannerJobExecContext) User() security.SQLUsername { return e.p.User() } +func (e *plannerJobExecContext) MigrationJobDeps() migration.JobDeps { + return e.p.MigrationJobDeps() +} // JobExecContext provides the execution environment for a job. It is what is // passed to the Resume/OnFailOrCancel/OnPauseRequested methods of a jobs's @@ -63,4 +67,5 @@ type JobExecContext interface { DistSQLPlanner() *DistSQLPlanner LeaseMgr() *lease.Manager User() security.SQLUsername + MigrationJobDeps() migration.JobDeps } diff --git a/pkg/sql/logictest/BUILD.bazel b/pkg/sql/logictest/BUILD.bazel index 9b409b8cb724..7feb995d2aca 100644 --- a/pkg/sql/logictest/BUILD.bazel +++ b/pkg/sql/logictest/BUILD.bazel @@ -8,7 +8,9 @@ go_library( deps = [ "//pkg/base", "//pkg/build", + "//pkg/clusterversion", "//pkg/kv/kvserver", + "//pkg/migration/migrationmanager", "//pkg/roachpb", "//pkg/security", "//pkg/server", diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go index b6cfdada86e8..4a0ef22f08fe 100644 --- a/pkg/sql/logictest/logic.go +++ b/pkg/sql/logictest/logic.go @@ -39,7 +39,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/build" + "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" + "github.com/cockroachdb/cockroach/pkg/migration/migrationmanager" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" @@ -1396,6 +1398,23 @@ func (t *logicTest) newCluster(serverArgs TestServerArgs) { binaryMinSupportedVersion, false, /* initializeVersion */ ) + + // If we're injecting fake versions, hook up logic to simulate the end + // version existing. + from := clusterversion.ClusterVersion{Version: cfg.bootstrapVersion} + to := clusterversion.ClusterVersion{Version: cfg.binaryVersion} + if len(clusterversion.ListBetween(from, to)) == 0 { + mm, ok := nodeParams.Knobs.MigrationManager.(*migrationmanager.TestingKnobs) + if !ok { + mm = &migrationmanager.TestingKnobs{} + nodeParams.Knobs.MigrationManager = mm + } + mm.ListBetweenOverride = func( + from, to clusterversion.ClusterVersion, + ) []clusterversion.ClusterVersion { + return []clusterversion.ClusterVersion{to} + } + } } paramsPerNode[i] = nodeParams } diff --git a/pkg/sql/logictest/testdata/logic_test/grant_table b/pkg/sql/logictest/testdata/logic_test/grant_table index 60d5ecd953cb..444fb8883c55 100644 --- a/pkg/sql/logictest/testdata/logic_test/grant_table +++ b/pkg/sql/logictest/testdata/logic_test/grant_table @@ -481,302 +481,312 @@ SELECT * FROM [SHOW GRANTS] WHERE schema_name NOT IN ('crdb_internal', 'pg_catalog', 'information_schema') ---- database_name schema_name relation_name grantee privilege_type -a pg_extension NULL readwrite ALL -a pg_extension NULL admin ALL -a pg_extension NULL root ALL -a pg_extension geography_columns public SELECT -a pg_extension geometry_columns public SELECT -a pg_extension spatial_ref_sys public SELECT -a public NULL readwrite ALL -a public NULL admin ALL -a public NULL root ALL -defaultdb pg_extension NULL admin ALL -defaultdb pg_extension NULL root ALL +system pg_extension geography_columns public SELECT +system pg_extension geometry_columns public SELECT +system pg_extension spatial_ref_sys public SELECT defaultdb pg_extension geography_columns public SELECT defaultdb pg_extension geometry_columns public SELECT defaultdb pg_extension spatial_ref_sys public SELECT -defaultdb public NULL admin ALL -defaultdb public NULL root ALL -postgres pg_extension NULL root ALL -postgres pg_extension NULL admin ALL postgres pg_extension geography_columns public SELECT postgres pg_extension geometry_columns public SELECT postgres pg_extension spatial_ref_sys public SELECT -postgres public NULL root ALL -postgres public NULL admin ALL -system pg_extension NULL admin GRANT -system pg_extension NULL root USAGE -system pg_extension NULL root GRANT -system pg_extension NULL admin USAGE -system pg_extension geography_columns public SELECT -system pg_extension geometry_columns public SELECT -system pg_extension spatial_ref_sys public SELECT -system public NULL admin GRANT -system public NULL admin USAGE -system public NULL root USAGE -system public NULL root GRANT -system public comments root UPDATE -system public comments admin GRANT -system public comments root SELECT -system public comments root DELETE -system public comments admin UPDATE -system public comments admin INSERT -system public comments admin DELETE -system public comments root INSERT -system public comments admin SELECT -system public comments public SELECT -system public comments root GRANT +test pg_extension geography_columns public SELECT +test pg_extension geometry_columns public SELECT +test pg_extension spatial_ref_sys public SELECT +a pg_extension geography_columns public SELECT +a pg_extension geometry_columns public SELECT +a pg_extension spatial_ref_sys public SELECT +system public namespace admin GRANT +system public namespace admin SELECT +system public namespace root GRANT +system public namespace root SELECT system public descriptor admin GRANT -system public descriptor root SELECT -system public descriptor root GRANT system public descriptor admin SELECT -system public eventlog admin GRANT -system public eventlog root UPDATE -system public eventlog root GRANT +system public descriptor root GRANT +system public descriptor root SELECT +system public users admin DELETE +system public users admin GRANT +system public users admin INSERT +system public users admin SELECT +system public users admin UPDATE +system public users root DELETE +system public users root GRANT +system public users root INSERT +system public users root SELECT +system public users root UPDATE +system public zones admin DELETE +system public zones admin GRANT +system public zones admin INSERT +system public zones admin SELECT +system public zones admin UPDATE +system public zones root DELETE +system public zones root GRANT +system public zones root INSERT +system public zones root SELECT +system public zones root UPDATE +system public settings admin DELETE +system public settings admin GRANT +system public settings admin INSERT +system public settings admin SELECT +system public settings admin UPDATE +system public settings root DELETE +system public settings root GRANT +system public settings root INSERT +system public settings root SELECT +system public settings root UPDATE +system public tenants admin GRANT +system public tenants admin SELECT +system public tenants root GRANT +system public tenants root SELECT +system public lease admin DELETE +system public lease admin GRANT +system public lease admin INSERT +system public lease admin SELECT +system public lease admin UPDATE +system public lease root DELETE +system public lease root GRANT +system public lease root INSERT +system public lease root SELECT +system public lease root UPDATE system public eventlog admin DELETE +system public eventlog admin GRANT +system public eventlog admin INSERT +system public eventlog admin SELECT system public eventlog admin UPDATE system public eventlog root DELETE -system public eventlog admin SELECT -system public eventlog admin INSERT -system public eventlog root SELECT +system public eventlog root GRANT system public eventlog root INSERT -system public jobs admin INSERT -system public jobs admin UPDATE -system public jobs root SELECT -system public jobs root INSERT -system public jobs root GRANT -system public jobs admin SELECT -system public jobs root DELETE -system public jobs admin DELETE -system public jobs admin GRANT -system public jobs root UPDATE -system public lease admin INSERT -system public lease admin GRANT -system public lease root UPDATE -system public lease root SELECT -system public lease root GRANT -system public lease root DELETE -system public lease admin UPDATE -system public lease admin SELECT -system public lease admin DELETE -system public lease root INSERT -system public locations root UPDATE -system public locations root SELECT -system public locations root INSERT -system public locations root GRANT -system public locations root DELETE -system public locations admin UPDATE -system public locations admin SELECT -system public locations admin INSERT -system public locations admin GRANT -system public locations admin DELETE -system public namespace admin SELECT -system public namespace root GRANT -system public namespace admin GRANT -system public namespace root SELECT -system public namespace2 root SELECT -system public namespace2 root GRANT -system public namespace2 admin GRANT -system public namespace2 admin SELECT -system public protected_ts_meta admin GRANT -system public protected_ts_meta admin SELECT -system public protected_ts_meta root SELECT -system public protected_ts_meta root GRANT -system public protected_ts_records root GRANT -system public protected_ts_records admin GRANT -system public protected_ts_records admin SELECT -system public protected_ts_records root SELECT -system public rangelog admin UPDATE -system public rangelog admin SELECT +system public eventlog root SELECT +system public eventlog root UPDATE +system public rangelog admin DELETE system public rangelog admin GRANT +system public rangelog admin INSERT +system public rangelog admin SELECT +system public rangelog admin UPDATE system public rangelog root DELETE system public rangelog root GRANT system public rangelog root INSERT system public rangelog root SELECT -system public rangelog admin DELETE -system public rangelog admin INSERT system public rangelog root UPDATE -system public replication_constraint_stats root SELECT -system public replication_constraint_stats root INSERT -system public replication_constraint_stats root GRANT -system public replication_constraint_stats root DELETE -system public replication_constraint_stats admin UPDATE -system public replication_constraint_stats admin SELECT -system public replication_constraint_stats admin INSERT -system public replication_constraint_stats root UPDATE +system public ui admin DELETE +system public ui admin GRANT +system public ui admin INSERT +system public ui admin SELECT +system public ui admin UPDATE +system public ui root DELETE +system public ui root GRANT +system public ui root INSERT +system public ui root SELECT +system public ui root UPDATE +system public jobs admin DELETE +system public jobs admin GRANT +system public jobs admin INSERT +system public jobs admin SELECT +system public jobs admin UPDATE +system public jobs root DELETE +system public jobs root GRANT +system public jobs root INSERT +system public jobs root SELECT +system public jobs root UPDATE +system public web_sessions admin DELETE +system public web_sessions admin GRANT +system public web_sessions admin INSERT +system public web_sessions admin SELECT +system public web_sessions admin UPDATE +system public web_sessions root DELETE +system public web_sessions root GRANT +system public web_sessions root INSERT +system public web_sessions root SELECT +system public web_sessions root UPDATE +system public table_statistics admin DELETE +system public table_statistics admin GRANT +system public table_statistics admin INSERT +system public table_statistics admin SELECT +system public table_statistics admin UPDATE +system public table_statistics root DELETE +system public table_statistics root GRANT +system public table_statistics root INSERT +system public table_statistics root SELECT +system public table_statistics root UPDATE +system public locations admin DELETE +system public locations admin GRANT +system public locations admin INSERT +system public locations admin SELECT +system public locations admin UPDATE +system public locations root DELETE +system public locations root GRANT +system public locations root INSERT +system public locations root SELECT +system public locations root UPDATE +system public role_members admin DELETE +system public role_members admin GRANT +system public role_members admin INSERT +system public role_members admin SELECT +system public role_members admin UPDATE +system public role_members root DELETE +system public role_members root GRANT +system public role_members root INSERT +system public role_members root SELECT +system public role_members root UPDATE +system public comments admin DELETE +system public comments admin GRANT +system public comments admin INSERT +system public comments admin SELECT +system public comments admin UPDATE +system public comments public SELECT +system public comments root DELETE +system public comments root GRANT +system public comments root INSERT +system public comments root SELECT +system public comments root UPDATE system public replication_constraint_stats admin DELETE system public replication_constraint_stats admin GRANT -system public replication_critical_localities root GRANT +system public replication_constraint_stats admin INSERT +system public replication_constraint_stats admin SELECT +system public replication_constraint_stats admin UPDATE +system public replication_constraint_stats root DELETE +system public replication_constraint_stats root GRANT +system public replication_constraint_stats root INSERT +system public replication_constraint_stats root SELECT +system public replication_constraint_stats root UPDATE system public replication_critical_localities admin DELETE -system public replication_critical_localities root INSERT -system public replication_critical_localities root SELECT system public replication_critical_localities admin GRANT -system public replication_critical_localities root UPDATE system public replication_critical_localities admin INSERT system public replication_critical_localities admin SELECT system public replication_critical_localities admin UPDATE system public replication_critical_localities root DELETE -system public replication_stats admin SELECT -system public replication_stats root SELECT +system public replication_critical_localities root GRANT +system public replication_critical_localities root INSERT +system public replication_critical_localities root SELECT +system public replication_critical_localities root UPDATE system public replication_stats admin DELETE system public replication_stats admin GRANT -system public replication_stats root DELETE +system public replication_stats admin INSERT +system public replication_stats admin SELECT system public replication_stats admin UPDATE +system public replication_stats root DELETE system public replication_stats root GRANT -system public replication_stats root UPDATE -system public replication_stats admin INSERT system public replication_stats root INSERT +system public replication_stats root SELECT +system public replication_stats root UPDATE system public reports_meta admin DELETE -system public reports_meta root INSERT -system public reports_meta root SELECT -system public reports_meta admin UPDATE -system public reports_meta admin SELECT -system public reports_meta root UPDATE -system public reports_meta admin INSERT system public reports_meta admin GRANT +system public reports_meta admin INSERT +system public reports_meta admin SELECT +system public reports_meta admin UPDATE system public reports_meta root DELETE system public reports_meta root GRANT -system public role_members admin DELETE -system public role_members admin GRANT -system public role_members admin INSERT -system public role_members admin SELECT -system public role_members root DELETE -system public role_members root GRANT -system public role_members root INSERT -system public role_members root UPDATE -system public role_members root SELECT -system public role_members admin UPDATE -system public role_options admin UPDATE -system public role_options root GRANT -system public role_options root DELETE +system public reports_meta root INSERT +system public reports_meta root SELECT +system public reports_meta root UPDATE +system public namespace2 admin GRANT +system public namespace2 admin SELECT +system public namespace2 root GRANT +system public namespace2 root SELECT +system public protected_ts_meta admin GRANT +system public protected_ts_meta admin SELECT +system public protected_ts_meta root GRANT +system public protected_ts_meta root SELECT +system public protected_ts_records admin GRANT +system public protected_ts_records admin SELECT +system public protected_ts_records root GRANT +system public protected_ts_records root SELECT system public role_options admin DELETE system public role_options admin GRANT system public role_options admin INSERT +system public role_options admin SELECT +system public role_options admin UPDATE +system public role_options root DELETE +system public role_options root GRANT system public role_options root INSERT system public role_options root SELECT system public role_options root UPDATE -system public role_options admin SELECT -system public scheduled_jobs admin INSERT -system public scheduled_jobs root UPDATE +system public statement_bundle_chunks admin DELETE +system public statement_bundle_chunks admin GRANT +system public statement_bundle_chunks admin INSERT +system public statement_bundle_chunks admin SELECT +system public statement_bundle_chunks admin UPDATE +system public statement_bundle_chunks root DELETE +system public statement_bundle_chunks root GRANT +system public statement_bundle_chunks root INSERT +system public statement_bundle_chunks root SELECT +system public statement_bundle_chunks root UPDATE +system public statement_diagnostics_requests admin DELETE +system public statement_diagnostics_requests admin GRANT +system public statement_diagnostics_requests admin INSERT +system public statement_diagnostics_requests admin SELECT +system public statement_diagnostics_requests admin UPDATE +system public statement_diagnostics_requests root DELETE +system public statement_diagnostics_requests root GRANT +system public statement_diagnostics_requests root INSERT +system public statement_diagnostics_requests root SELECT +system public statement_diagnostics_requests root UPDATE +system public statement_diagnostics admin DELETE +system public statement_diagnostics admin GRANT +system public statement_diagnostics admin INSERT +system public statement_diagnostics admin SELECT +system public statement_diagnostics admin UPDATE +system public statement_diagnostics root DELETE +system public statement_diagnostics root GRANT +system public statement_diagnostics root INSERT +system public statement_diagnostics root SELECT +system public statement_diagnostics root UPDATE system public scheduled_jobs admin DELETE -system public scheduled_jobs root SELECT system public scheduled_jobs admin GRANT -system public scheduled_jobs root INSERT -system public scheduled_jobs root GRANT -system public scheduled_jobs root DELETE +system public scheduled_jobs admin INSERT system public scheduled_jobs admin SELECT system public scheduled_jobs admin UPDATE -system public settings root DELETE -system public settings root INSERT -system public settings root SELECT -system public settings admin DELETE -system public settings admin GRANT -system public settings admin INSERT -system public settings admin SELECT -system public settings root UPDATE -system public settings admin UPDATE -system public settings root GRANT +system public scheduled_jobs root DELETE +system public scheduled_jobs root GRANT +system public scheduled_jobs root INSERT +system public scheduled_jobs root SELECT +system public scheduled_jobs root UPDATE +system public sqlliveness admin DELETE +system public sqlliveness admin GRANT +system public sqlliveness admin INSERT system public sqlliveness admin SELECT system public sqlliveness admin UPDATE -system public sqlliveness admin GRANT system public sqlliveness root DELETE system public sqlliveness root GRANT -system public sqlliveness admin DELETE +system public sqlliveness root INSERT system public sqlliveness root SELECT system public sqlliveness root UPDATE -system public sqlliveness root INSERT -system public sqlliveness admin INSERT -system public statement_bundle_chunks root SELECT -system public statement_bundle_chunks root INSERT -system public statement_bundle_chunks root DELETE -system public statement_bundle_chunks admin UPDATE -system public statement_bundle_chunks admin SELECT -system public statement_bundle_chunks root GRANT -system public statement_bundle_chunks admin INSERT -system public statement_bundle_chunks root UPDATE -system public statement_bundle_chunks admin DELETE -system public statement_bundle_chunks admin GRANT -system public statement_diagnostics root SELECT -system public statement_diagnostics root GRANT -system public statement_diagnostics root DELETE -system public statement_diagnostics admin UPDATE -system public statement_diagnostics admin SELECT -system public statement_diagnostics admin INSERT -system public statement_diagnostics admin GRANT -system public statement_diagnostics admin DELETE -system public statement_diagnostics root UPDATE -system public statement_diagnostics root INSERT -system public statement_diagnostics_requests admin SELECT -system public statement_diagnostics_requests root UPDATE -system public statement_diagnostics_requests root SELECT -system public statement_diagnostics_requests root INSERT -system public statement_diagnostics_requests root GRANT -system public statement_diagnostics_requests admin DELETE -system public statement_diagnostics_requests admin UPDATE -system public statement_diagnostics_requests admin INSERT -system public statement_diagnostics_requests root DELETE -system public statement_diagnostics_requests admin GRANT -system public table_statistics root SELECT -system public table_statistics admin UPDATE -system public table_statistics admin DELETE -system public table_statistics admin INSERT -system public table_statistics admin SELECT -system public table_statistics root INSERT -system public table_statistics root GRANT -system public table_statistics root UPDATE -system public table_statistics admin GRANT -system public table_statistics root DELETE -system public tenants root SELECT -system public tenants root GRANT -system public tenants admin SELECT -system public tenants admin GRANT -system public ui admin GRANT -system public ui root SELECT -system public ui root UPDATE -system public ui admin DELETE -system public ui admin UPDATE -system public ui admin SELECT -system public ui root INSERT -system public ui root DELETE -system public ui root GRANT -system public ui admin INSERT -system public users admin SELECT -system public users admin GRANT -system public users admin DELETE -system public users admin UPDATE -system public users root DELETE -system public users root GRANT -system public users root INSERT -system public users admin INSERT -system public users root UPDATE -system public users root SELECT -system public web_sessions admin SELECT -system public web_sessions root UPDATE -system public web_sessions root SELECT -system public web_sessions admin INSERT -system public web_sessions root GRANT -system public web_sessions root DELETE -system public web_sessions admin GRANT -system public web_sessions admin DELETE -system public web_sessions root INSERT -system public web_sessions admin UPDATE -system public zones root UPDATE -system public zones admin UPDATE -system public zones admin INSERT -system public zones admin SELECT -system public zones root DELETE -system public zones root GRANT -system public zones admin GRANT -system public zones root SELECT -system public zones admin DELETE -system public zones root INSERT +system public migrations admin DELETE +system public migrations admin GRANT +system public migrations admin INSERT +system public migrations admin SELECT +system public migrations admin UPDATE +system public migrations root DELETE +system public migrations root GRANT +system public migrations root INSERT +system public migrations root SELECT +system public migrations root UPDATE +a pg_extension NULL admin ALL +a pg_extension NULL readwrite ALL +a pg_extension NULL root ALL +a public NULL admin ALL +a public NULL readwrite ALL +a public NULL root ALL +defaultdb pg_extension NULL admin ALL +defaultdb pg_extension NULL root ALL +defaultdb public NULL admin ALL +defaultdb public NULL root ALL +postgres pg_extension NULL admin ALL +postgres pg_extension NULL root ALL +postgres public NULL admin ALL +postgres public NULL root ALL +system pg_extension NULL admin GRANT +system pg_extension NULL admin USAGE +system pg_extension NULL root GRANT +system pg_extension NULL root USAGE +system public NULL admin GRANT +system public NULL admin USAGE +system public NULL root GRANT +system public NULL root USAGE test pg_extension NULL admin ALL test pg_extension NULL root ALL -test pg_extension geography_columns public SELECT -test pg_extension geometry_columns public SELECT -test pg_extension spatial_ref_sys public SELECT -test public NULL root ALL test public NULL admin ALL +test public NULL root ALL query TTTTT colnames SHOW GRANTS FOR root @@ -1139,6 +1149,11 @@ system public locations root GRA system public locations root INSERT system public locations root SELECT system public locations root UPDATE +system public migrations root DELETE +system public migrations root GRANT +system public migrations root INSERT +system public migrations root SELECT +system public migrations root UPDATE system public namespace root GRANT system public namespace root SELECT system public namespace2 root GRANT diff --git a/pkg/sql/logictest/testdata/logic_test/information_schema b/pkg/sql/logictest/testdata/logic_test/information_schema index 24c8e1bdb9ea..e74bf5812088 100755 --- a/pkg/sql/logictest/testdata/logic_test/information_schema +++ b/pkg/sql/logictest/testdata/logic_test/information_schema @@ -774,6 +774,7 @@ system public statement_diagnostics_requests BASE T system public statement_diagnostics BASE TABLE YES 1 system public scheduled_jobs BASE TABLE YES 1 system public sqlliveness BASE TABLE YES 1 +system public migrations BASE TABLE YES 1 statement ok ALTER TABLE other_db.xyz ADD COLUMN j INT @@ -859,6 +860,12 @@ system public 630200280_21_2_not_null system system public 630200280_21_3_not_null system public locations CHECK NO NO system public 630200280_21_4_not_null system public locations CHECK NO NO system public primary system public locations PRIMARY KEY NO NO +system public 630200280_40_1_not_null system public migrations CHECK NO NO +system public 630200280_40_2_not_null system public migrations CHECK NO NO +system public 630200280_40_3_not_null system public migrations CHECK NO NO +system public 630200280_40_4_not_null system public migrations CHECK NO NO +system public 630200280_40_5_not_null system public migrations CHECK NO NO +system public primary system public migrations PRIMARY KEY NO NO system public 630200280_2_1_not_null system public namespace CHECK NO NO system public 630200280_2_2_not_null system public namespace CHECK NO NO system public primary system public namespace PRIMARY KEY NO NO @@ -1089,6 +1096,11 @@ system public 630200280_37_9_not_null executor_type I system public 630200280_39_1_not_null session_id IS NOT NULL system public 630200280_39_2_not_null expiration IS NOT NULL system public 630200280_3_1_not_null id IS NOT NULL +system public 630200280_40_1_not_null major IS NOT NULL +system public 630200280_40_2_not_null minor IS NOT NULL +system public 630200280_40_3_not_null patch IS NOT NULL +system public 630200280_40_4_not_null internal IS NOT NULL +system public 630200280_40_5_not_null completed_at IS NOT NULL system public 630200280_4_1_not_null username IS NOT NULL system public 630200280_4_3_not_null isRole IS NOT NULL system public 630200280_5_1_not_null id IS NOT NULL @@ -1118,6 +1130,10 @@ system public lease nodeID sy system public lease version system public primary system public locations localityKey system public primary system public locations localityValue system public primary +system public migrations internal system public primary +system public migrations major system public primary +system public migrations minor system public primary +system public migrations patch system public primary system public namespace name system public primary system public namespace parentID system public primary system public namespace2 name system public primary @@ -1282,6 +1298,11 @@ system public locations latitude system public locations localityKey 1 system public locations localityValue 2 system public locations longitude 4 +system public migrations completed_at 5 +system public migrations internal 4 +system public migrations major 1 +system public migrations minor 2 +system public migrations patch 3 system public namespace id 3 system public namespace name 2 system public namespace parentID 1 @@ -2041,6 +2062,16 @@ NULL root system public locations NULL root system public locations INSERT NULL NO NULL root system public locations SELECT NULL YES NULL root system public locations UPDATE NULL NO +NULL admin system public migrations DELETE NULL NO +NULL admin system public migrations GRANT NULL NO +NULL admin system public migrations INSERT NULL NO +NULL admin system public migrations SELECT NULL YES +NULL admin system public migrations UPDATE NULL NO +NULL root system public migrations DELETE NULL NO +NULL root system public migrations GRANT NULL NO +NULL root system public migrations INSERT NULL NO +NULL root system public migrations SELECT NULL YES +NULL root system public migrations UPDATE NULL NO NULL admin system public namespace GRANT NULL NO NULL admin system public namespace SELECT NULL YES NULL root system public namespace GRANT NULL NO @@ -2627,6 +2658,16 @@ NULL root system public sqlliveness NULL root system public sqlliveness INSERT NULL NO NULL root system public sqlliveness SELECT NULL YES NULL root system public sqlliveness UPDATE NULL NO +NULL admin system public migrations DELETE NULL NO +NULL admin system public migrations GRANT NULL NO +NULL admin system public migrations INSERT NULL NO +NULL admin system public migrations SELECT NULL YES +NULL admin system public migrations UPDATE NULL NO +NULL root system public migrations DELETE NULL NO +NULL root system public migrations GRANT NULL NO +NULL root system public migrations INSERT NULL NO +NULL root system public migrations SELECT NULL YES +NULL root system public migrations UPDATE NULL NO statement ok CREATE TABLE other_db.xyz (i INT) diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index 7c082b4fa5c9..589e8b673bcc 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -796,6 +796,7 @@ indexrelid indrelid indnatts indisunique indisprimary indisexclusion indim 2008917578 37 1 false false false false false true false false true false 5 0 0 2 NULL NULL 2101708905 5 1 true true false true false true false false true false 1 0 0 2 NULL NULL 2148104569 21 2 true true false true false true false false true false 1 2 3403232968 3403232968 0 0 2 2 NULL NULL +2268653844 40 4 true true false true false true false false true false 1 2 3 4 0 0 0 0 0 0 0 0 2 2 2 2 NULL NULL 2361445172 8 1 true true false true false true false false true false 1 0 0 2 NULL NULL 2407840836 24 3 true true false true false true false false true false 1 2 3 0 0 0 0 0 0 2 2 2 NULL NULL 2621181440 15 2 false false false false false true false false true false 2 3 3403232968 0 0 0 2 2 NULL NULL @@ -852,6 +853,10 @@ indexrelid operator_argument_type_oid operator_argument_position 2101708905 0 1 2148104569 0 1 2148104569 0 2 +2268653844 0 1 +2268653844 0 2 +2268653844 0 3 +2268653844 0 4 2361445172 0 1 2407840836 0 1 2407840836 0 2 diff --git a/pkg/sql/logictest/testdata/logic_test/ranges b/pkg/sql/logictest/testdata/logic_test/ranges index 039d165ce29e..c0151119790a 100644 --- a/pkg/sql/logictest/testdata/logic_test/ranges +++ b/pkg/sql/logictest/testdata/logic_test/ranges @@ -309,7 +309,8 @@ start_key start_pretty end_key [172] /Table/36 [173] /Table/37 system statement_diagnostics · {1} 1 [173] /Table/37 [174] /Table/38 system scheduled_jobs · {1} 1 [174] /Table/38 [175] /Table/39 · · · {1} 1 -[175] /Table/39 [189 137] /Table/53/1 system sqlliveness · {1} 1 +[175] /Table/39 [176] /Table/40 system sqlliveness · {1} 1 +[176] /Table/40 [189 137] /Table/53/1 system migrations · {1} 1 [189 137] /Table/53/1 [189 137 137] /Table/53/1/1 test t · {1} 1 [189 137 137] /Table/53/1/1 [189 137 141 137] /Table/53/1/5/1 test t · {3,4} 3 [189 137 141 137] /Table/53/1/5/1 [189 137 141 138] /Table/53/1/5/2 test t · {1,2,3} 1 @@ -367,7 +368,8 @@ start_key start_pretty end_key [172] /Table/36 [173] /Table/37 system statement_diagnostics · {1} 1 [173] /Table/37 [174] /Table/38 system scheduled_jobs · {1} 1 [174] /Table/38 [175] /Table/39 · · · {1} 1 -[175] /Table/39 [189 137] /Table/53/1 system sqlliveness · {1} 1 +[175] /Table/39 [176] /Table/40 system sqlliveness · {1} 1 +[176] /Table/40 [189 137] /Table/53/1 system migrations · {1} 1 [189 137] /Table/53/1 [189 137 137] /Table/53/1/1 test t · {1} 1 [189 137 137] /Table/53/1/1 [189 137 141 137] /Table/53/1/5/1 test t · {3,4} 3 [189 137 141 137] /Table/53/1/5/1 [189 137 141 138] /Table/53/1/5/2 test t · {1,2,3} 1 diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source index 7bc7be1535f1..2b4ca2dde365 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_source +++ b/pkg/sql/logictest/testdata/logic_test/show_source @@ -196,68 +196,70 @@ SELECT * FROM [SHOW TABLES FROM system] ---- schema_name table_name type owner estimated_row_count locality public namespace table NULL NULL NULL -public descriptor table NULL NULL NULL -public users table NULL NULL NULL -public zones table NULL NULL NULL -public settings table NULL NULL NULL -public tenants table NULL NULL NULL -public lease table NULL NULL NULL -public eventlog table NULL NULL NULL -public rangelog table NULL NULL NULL -public ui table NULL NULL NULL -public jobs table NULL NULL NULL -public web_sessions table NULL NULL NULL -public table_statistics table NULL NULL NULL -public locations table NULL NULL NULL -public role_members table NULL NULL NULL -public comments table NULL NULL NULL -public replication_constraint_stats table NULL NULL NULL -public replication_critical_localities table NULL NULL NULL -public replication_stats table NULL NULL NULL -public reports_meta table NULL NULL NULL -public namespace2 table NULL NULL NULL -public protected_ts_meta table NULL NULL NULL -public protected_ts_records table NULL NULL NULL -public role_options table NULL NULL NULL -public statement_bundle_chunks table NULL NULL NULL -public statement_diagnostics_requests table NULL NULL NULL -public statement_diagnostics table NULL NULL NULL -public scheduled_jobs table NULL NULL NULL +public migrations table NULL NULL NULL public sqlliveness table NULL NULL NULL +public scheduled_jobs table NULL NULL NULL +public statement_diagnostics table NULL NULL NULL +public statement_diagnostics_requests table NULL NULL NULL +public statement_bundle_chunks table NULL NULL NULL +public role_options table NULL NULL NULL +public protected_ts_records table NULL NULL NULL +public protected_ts_meta table NULL NULL NULL +public namespace2 table NULL NULL NULL +public reports_meta table NULL NULL NULL +public replication_stats table NULL NULL NULL +public replication_critical_localities table NULL NULL NULL +public replication_constraint_stats table NULL NULL NULL +public comments table NULL NULL NULL +public role_members table NULL NULL NULL +public locations table NULL NULL NULL +public table_statistics table NULL NULL NULL +public web_sessions table NULL NULL NULL +public jobs table NULL NULL NULL +public ui table NULL NULL NULL +public rangelog table NULL NULL NULL +public eventlog table NULL NULL NULL +public lease table NULL NULL NULL +public tenants table NULL NULL NULL +public settings table NULL NULL NULL +public zones table NULL NULL NULL +public users table NULL NULL NULL +public descriptor table NULL NULL NULL query TTTTTTT colnames,rowsort SELECT * FROM [SHOW TABLES FROM system WITH COMMENT] ---- schema_name table_name type owner estimated_row_count locality comment public namespace table NULL NULL NULL · -public descriptor table NULL NULL NULL · -public users table NULL NULL NULL · -public zones table NULL NULL NULL · -public settings table NULL NULL NULL · -public tenants table NULL NULL NULL · -public lease table NULL NULL NULL · -public eventlog table NULL NULL NULL · -public rangelog table NULL NULL NULL · -public ui table NULL NULL NULL · -public jobs table NULL NULL NULL · -public web_sessions table NULL NULL NULL · -public table_statistics table NULL NULL NULL · -public locations table NULL NULL NULL · -public role_members table NULL NULL NULL · -public comments table NULL NULL NULL · -public replication_constraint_stats table NULL NULL NULL · -public replication_critical_localities table NULL NULL NULL · -public replication_stats table NULL NULL NULL · -public reports_meta table NULL NULL NULL · -public namespace2 table NULL NULL NULL · -public protected_ts_meta table NULL NULL NULL · -public protected_ts_records table NULL NULL NULL · -public role_options table NULL NULL NULL · -public statement_bundle_chunks table NULL NULL NULL · -public statement_diagnostics_requests table NULL NULL NULL · -public statement_diagnostics table NULL NULL NULL · -public scheduled_jobs table NULL NULL NULL · +public migrations table NULL NULL NULL · public sqlliveness table NULL NULL NULL · +public scheduled_jobs table NULL NULL NULL · +public statement_diagnostics table NULL NULL NULL · +public statement_diagnostics_requests table NULL NULL NULL · +public statement_bundle_chunks table NULL NULL NULL · +public role_options table NULL NULL NULL · +public protected_ts_records table NULL NULL NULL · +public protected_ts_meta table NULL NULL NULL · +public namespace2 table NULL NULL NULL · +public reports_meta table NULL NULL NULL · +public replication_stats table NULL NULL NULL · +public replication_critical_localities table NULL NULL NULL · +public replication_constraint_stats table NULL NULL NULL · +public comments table NULL NULL NULL · +public role_members table NULL NULL NULL · +public locations table NULL NULL NULL · +public table_statistics table NULL NULL NULL · +public web_sessions table NULL NULL NULL · +public jobs table NULL NULL NULL · +public ui table NULL NULL NULL · +public rangelog table NULL NULL NULL · +public eventlog table NULL NULL NULL · +public lease table NULL NULL NULL · +public tenants table NULL NULL NULL · +public settings table NULL NULL NULL · +public zones table NULL NULL NULL · +public users table NULL NULL NULL · +public descriptor table NULL NULL NULL · query ITTT colnames SELECT node_id, user_name, application_name, active_queries diff --git a/pkg/sql/logictest/testdata/logic_test/system b/pkg/sql/logictest/testdata/logic_test/system index 036fae47b2d1..4c50cf625b0b 100644 --- a/pkg/sql/logictest/testdata/logic_test/system +++ b/pkg/sql/logictest/testdata/logic_test/system @@ -16,6 +16,7 @@ public eventlog table NULL NULL NULL public jobs table NULL NULL NULL public lease table NULL NULL NULL public locations table NULL NULL NULL +public migrations table NULL NULL NULL public namespace table NULL NULL NULL public namespace2 table NULL NULL NULL public protected_ts_meta table NULL NULL NULL @@ -73,6 +74,7 @@ SELECT id FROM system.descriptor 36 37 39 +40 50 51 52 @@ -246,6 +248,16 @@ system public locations root GRANT system public locations root INSERT system public locations root SELECT system public locations root UPDATE +system public migrations admin DELETE +system public migrations admin GRANT +system public migrations admin INSERT +system public migrations admin SELECT +system public migrations admin UPDATE +system public migrations root DELETE +system public migrations root GRANT +system public migrations root INSERT +system public migrations root SELECT +system public migrations root UPDATE system public namespace admin GRANT system public namespace admin SELECT system public namespace root GRANT diff --git a/pkg/sql/logictest/testdata/logic_test/system_namespace b/pkg/sql/logictest/testdata/logic_test/system_namespace index ce32ec8c320b..6291b9c053f3 100644 --- a/pkg/sql/logictest/testdata/logic_test/system_namespace +++ b/pkg/sql/logictest/testdata/logic_test/system_namespace @@ -18,6 +18,7 @@ SELECT * FROM system.namespace 1 29 jobs 15 1 29 lease 11 1 29 locations 21 +1 29 migrations 40 1 29 namespace 2 1 29 namespace2 30 1 29 protected_ts_meta 31 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/autocommit_nonmetamorphic b/pkg/sql/opt/exec/execbuilder/testdata/autocommit_nonmetamorphic index f52fa55cec6c..7191149f250a 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/autocommit_nonmetamorphic +++ b/pkg/sql/opt/exec/execbuilder/testdata/autocommit_nonmetamorphic @@ -41,7 +41,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 # Multi-row insert should auto-commit. query B @@ -62,7 +62,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 # No auto-commit inside a transaction. statement ok @@ -86,7 +86,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 2 CPut to (n1,s1):1 +dist sender send r36: sending batch 2 CPut to (n1,s1):1 statement ok ROLLBACK @@ -110,7 +110,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 # TODO(radu): allow non-side-effecting projections. query B @@ -132,8 +132,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 2 CPut to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 CPut to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 # Insert with RETURNING statement with side-effects should not auto-commit. # In this case division can (in principle) error out. @@ -156,8 +156,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 2 CPut to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 CPut to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 # Another way to test the scenario above: generate an error and ensure that the # mutation was not committed. @@ -192,7 +192,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 Put, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Put, 1 EndTxn to (n1,s1):1 # Multi-row upsert should auto-commit. query B @@ -213,7 +213,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 2 Put, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 Put, 1 EndTxn to (n1,s1):1 # No auto-commit inside a transaction. statement ok @@ -237,7 +237,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 2 Put to (n1,s1):1 +dist sender send r36: sending batch 2 Put to (n1,s1):1 statement ok ROLLBACK @@ -261,7 +261,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 2 Put, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 Put, 1 EndTxn to (n1,s1):1 # TODO(radu): allow non-side-effecting projections. query B @@ -283,8 +283,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 2 Put to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 Put to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 # Upsert with RETURNING statement with side-effects should not auto-commit. # In this case division can (in principle) error out. @@ -307,8 +307,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 2 Put to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 Put to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 # Another way to test the scenario above: generate an error and ensure that the # mutation was not committed. @@ -343,8 +343,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 2 Put, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 2 Put, 1 EndTxn to (n1,s1):1 # No auto-commit inside a transaction. statement ok @@ -368,8 +368,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 2 Put to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 2 Put to (n1,s1):1 statement ok ROLLBACK @@ -393,8 +393,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 2 Put, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 2 Put, 1 EndTxn to (n1,s1):1 # TODO(radu): allow non-side-effecting projections. query B @@ -416,9 +416,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 2 Put to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 2 Put to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 # Update with RETURNING statement with side-effects should not auto-commit. # In this case division can (in principle) error out. @@ -441,9 +441,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 2 Put to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 2 Put to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 # Another way to test the scenario above: generate an error and ensure that the # mutation was not committed. @@ -478,7 +478,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 DelRng, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 DelRng, 1 EndTxn to (n1,s1):1 # Multi-row delete should auto-commit. query B @@ -499,7 +499,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 DelRng, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 DelRng, 1 EndTxn to (n1,s1):1 # No auto-commit inside a transaction. statement ok @@ -523,7 +523,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 DelRng to (n1,s1):1 +dist sender send r36: sending batch 1 DelRng to (n1,s1):1 statement ok ROLLBACK @@ -547,8 +547,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 2 Del, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 2 Del, 1 EndTxn to (n1,s1):1 # TODO(radu): allow non-side-effecting projections. query B @@ -570,9 +570,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 2 Del to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 2 Del to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 # Insert with RETURNING statement with side-effects should not auto-commit. # In this case division can (in principle) error out. @@ -595,9 +595,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 2 Del to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 2 Del to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 statement ok INSERT INTO ab VALUES (12, 0); @@ -644,9 +644,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 2 CPut to (n1,s1):1 -dist sender send r35: sending batch 2 Scan to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 CPut to (n1,s1):1 +dist sender send r36: sending batch 2 Scan to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 query B SELECT count(*) > 0 FROM [ @@ -667,10 +667,10 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 1 Put to (n1,s1):1 -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 1 Put to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 query B SELECT count(*) > 0 FROM [ @@ -692,10 +692,10 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 1 Del to (n1,s1):1 -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 1 Del to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 # Test with a single cascade, which should use autocommit. statement ok @@ -719,9 +719,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 1 DelRng to (n1,s1):1 -dist sender send r35: sending batch 1 Scan to (n1,s1):1 -dist sender send r35: sending batch 1 Del, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 DelRng to (n1,s1):1 +dist sender send r36: sending batch 1 Scan to (n1,s1):1 +dist sender send r36: sending batch 1 Del, 1 EndTxn to (n1,s1):1 # ----------------------- # Multiple mutation tests @@ -749,9 +749,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 2 CPut to (n1,s1):1 -dist sender send r35: sending batch 2 CPut to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 CPut to (n1,s1):1 +dist sender send r36: sending batch 2 CPut to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 query B SELECT count(*) > 0 FROM [ @@ -774,6 +774,6 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND operation NOT LIKE '%async%' ---- -dist sender send r35: sending batch 2 CPut to (n1,s1):1 -dist sender send r35: sending batch 2 CPut to (n1,s1):1 -dist sender send r35: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 2 CPut to (n1,s1):1 +dist sender send r36: sending batch 2 CPut to (n1,s1):1 +dist sender send r36: sending batch 1 EndTxn to (n1,s1):1 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/delete b/pkg/sql/opt/exec/execbuilder/testdata/delete index 37fef69ef74f..752dd4fb9228 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/delete +++ b/pkg/sql/opt/exec/execbuilder/testdata/delete @@ -231,9 +231,9 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE message LIKE '%DelRange%' OR message LIKE '%DelRng%' ---- flow DelRange /Table/57/1 - /Table/57/2 -dist sender send r35: sending batch 1 DelRng to (n1,s1):1 +dist sender send r36: sending batch 1 DelRng to (n1,s1):1 flow DelRange /Table/57/1/601/0 - /Table/57/2 -dist sender send r35: sending batch 1 DelRng to (n1,s1):1 +dist sender send r36: sending batch 1 DelRng to (n1,s1):1 # Ensure that DelRange requests are autocommitted when DELETE FROM happens on a # chunk of fewer than 600 keys. @@ -249,7 +249,7 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE message LIKE '%DelRange%' OR message LIKE '%sending batch%' ---- flow DelRange /Table/57/1/5 - /Table/57/1/5/# -dist sender send r35: sending batch 1 DelRng, 1 EndTxn to (n1,s1):1 +dist sender send r36: sending batch 1 DelRng, 1 EndTxn to (n1,s1):1 # Test use of fast path when there are interleaved tables. diff --git a/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic b/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic index 43b1749dbd15..ff0a2d537d9b 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic +++ b/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic @@ -77,7 +77,7 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] ---- flow CPut /Table/54/1/1/0 -> /TUPLE/2:2:Int/2 flow InitPut /Table/54/2/2/0 -> /BYTES/0x89 -kv.DistSender: sending partial batch r35: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 flow fast path completed exec stmt rows affected: 1 @@ -92,7 +92,7 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] ---- flow CPut /Table/54/1/1/0 -> /TUPLE/2:2:Int/2 flow InitPut /Table/54/2/2/0 -> /BYTES/0x89 -kv.DistSender: sending partial batch r35: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "primary" statement error duplicate key value @@ -105,7 +105,7 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] ---- flow CPut /Table/54/1/2/0 -> /TUPLE/2:2:Int/2 flow InitPut /Table/54/2/2/0 -> /BYTES/0x8a -kv.DistSender: sending partial batch r35: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "woo" statement ok @@ -185,7 +185,7 @@ colbatchscan Scan /Table/54/{1-2} colbatchscan fetched: /kv/primary/1/v -> /2 flow Del /Table/54/2/2/0 flow Del /Table/54/1/1/0 -kv.DistSender: sending partial batch r35: sending batch 1 Del to (n1,s1):1 +kv.DistSender: sending partial batch r36: sending batch 1 Del to (n1,s1):1 flow fast path completed exec stmt rows affected: 1 @@ -242,7 +242,7 @@ SET tracing = on; INSERT INTO t.kv3 (k, v) VALUES (1,1); SET tracing = off query T SELECT message FROM [SHOW TRACE FOR SESSION] WHERE message LIKE e'%1 CPut, 1 EndTxn%' AND message NOT LIKE e'%proposing command%' ---- -r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +r37: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 node received request: 1 CPut, 1 EndTxn # Temporarily disabled flaky test (#58202). diff --git a/pkg/sql/opt/exec/execbuilder/testdata/upsert_nonmetamorphic b/pkg/sql/opt/exec/execbuilder/testdata/upsert_nonmetamorphic index 25b1e81bc37e..7fed366be2d0 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/upsert_nonmetamorphic +++ b/pkg/sql/opt/exec/execbuilder/testdata/upsert_nonmetamorphic @@ -41,7 +41,7 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] colbatchscan Scan /Table/55/1/2{-/#} flow CPut /Table/55/1/2/0 -> /TUPLE/2:2:Int/3 flow InitPut /Table/55/2/3/0 -> /BYTES/0x8a -kv.DistSender: sending partial batch r35: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 flow fast path completed exec stmt rows affected: 1 @@ -55,7 +55,7 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] colbatchscan Scan /Table/55/1/1{-/#} flow CPut /Table/55/1/1/0 -> /TUPLE/2:2:Int/2 flow InitPut /Table/55/2/2/0 -> /BYTES/0x89 -kv.DistSender: sending partial batch r35: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 flow fast path completed exec stmt rows affected: 1 @@ -72,5 +72,5 @@ colbatchscan fetched: /kv/primary/2/v -> /3 flow Put /Table/55/1/2/0 -> /TUPLE/2:2:Int/2 flow Del /Table/55/2/3/0 flow CPut /Table/55/2/2/0 -> /BYTES/0x8a (expecting does not exist) -kv.DistSender: sending partial batch r35: sending batch 1 Put, 1 EndTxn to (n1,s1):1 +kv.DistSender: sending partial batch r36: sending batch 1 Put, 1 EndTxn to (n1,s1):1 exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "woo" diff --git a/pkg/sql/pgwire/pgwire_test.go b/pkg/sql/pgwire/pgwire_test.go index 704e08914fea..447b39baef90 100644 --- a/pkg/sql/pgwire/pgwire_test.go +++ b/pkg/sql/pgwire/pgwire_test.go @@ -556,7 +556,7 @@ func TestPGPreparedQuery(t *testing.T) { baseTest.Results("users", "primary", false, 1, "username", "ASC", false, false), }}, {"SHOW TABLES FROM system", []preparedQueryTest{ - baseTest.Results("public", "comments", "table", gosql.NullString{}, gosql.NullString{}, gosql.NullString{}).Others(28), + baseTest.Results("public", "comments", "table", gosql.NullString{}, gosql.NullString{}, gosql.NullString{}).Others(29), }}, {"SHOW SCHEMAS FROM system", []preparedQueryTest{ baseTest.Results("crdb_internal", gosql.NullString{}).Others(4), diff --git a/pkg/sql/planhook.go b/pkg/sql/planhook.go index f61af5e7bba6..1ddda82ae979 100644 --- a/pkg/sql/planhook.go +++ b/pkg/sql/planhook.go @@ -13,6 +13,7 @@ package sql import ( "context" + "github.com/cockroachdb/cockroach/pkg/migration" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -97,6 +98,7 @@ type PlanHookState interface { ) (string, error) CreateSchemaNamespaceEntry(ctx context.Context, schemaNameKey roachpb.Key, schemaID descpb.ID) error + MigrationJobDeps() migration.JobDeps } // AddPlanHook adds a hook used to short-circuit creating a planNode from a diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index ab3c9a840f67..f0993210eaaa 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" + "github.com/cockroachdb/cockroach/pkg/migration" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security" @@ -473,6 +474,11 @@ func (p *planner) DistSQLPlanner() *DistSQLPlanner { return p.extendedEvalCtx.DistSQLPlanner } +// MigrationJobDeps returns the migration.JobDeps. +func (p *planner) MigrationJobDeps() migration.JobDeps { + return p.execCfg.MigrationJobDeps +} + // GetTypeFromValidSQLSyntax implements the tree.EvalPlanner interface. // We define this here to break the dependency from eval.go to the parser. func (p *planner) GetTypeFromValidSQLSyntax(sql string) (*types.T, error) { diff --git a/pkg/sql/set_cluster_setting.go b/pkg/sql/set_cluster_setting.go index 249ad5d68cfc..9022229cbfd4 100644 --- a/pkg/sql/set_cluster_setting.go +++ b/pkg/sql/set_cluster_setting.go @@ -53,7 +53,7 @@ type setClusterSettingNode struct { // versionUpgradeHook is called after validating a `SET CLUSTER SETTING // version` but before executing it. It can carry out arbitrary migrations // that allow us to eventually remove legacy code. - versionUpgradeHook func(ctx context.Context, from, to clusterversion.ClusterVersion) error + versionUpgradeHook func(ctx context.Context, username security.SQLUsername, from, to clusterversion.ClusterVersion) error } func checkPrivilegesForSetting(ctx context.Context, p *planner, name string, action string) error { @@ -259,7 +259,7 @@ func (n *setClusterSettingNode) startExec(params runParams) error { // toSettingString already validated the input, and checked to // see that we are allowed to transition. Let's call into our // upgrade hook to run migrations, if any. - if err := n.versionUpgradeHook(ctx, from, to); err != nil { + if err := n.versionUpgradeHook(ctx, params.p.User(), from, to); err != nil { return err } } diff --git a/pkg/sql/tests/system_table_test.go b/pkg/sql/tests/system_table_test.go index fe110ada1484..4b1c4db1abda 100644 --- a/pkg/sql/tests/system_table_test.go +++ b/pkg/sql/tests/system_table_test.go @@ -191,6 +191,7 @@ func TestSystemTableLiterals(t *testing.T) { {keys.StatementDiagnosticsTableID, systemschema.StatementDiagnosticsTableSchema, systemschema.StatementDiagnosticsTable}, {keys.ScheduledJobsTableID, systemschema.ScheduledJobsTableSchema, systemschema.ScheduledJobsTable}, {keys.SqllivenessID, systemschema.SqllivenessTableSchema, systemschema.SqllivenessTable}, + {keys.MigrationsID, systemschema.MigrationsTableSchema, systemschema.MigrationsTable}, } { privs := *test.pkg.GetPrivileges() gen, err := sql.CreateTestTableDescriptor( diff --git a/pkg/sql/tests/testdata/initial_keys b/pkg/sql/tests/testdata/initial_keys index 0a3eeea059ec..fede84a0c496 100644 --- a/pkg/sql/tests/testdata/initial_keys +++ b/pkg/sql/tests/testdata/initial_keys @@ -1,6 +1,6 @@ initial-keys tenant=system ---- -69 keys: +71 keys: /System/"desc-idgen" /Table/3/1/1/2/1 /Table/3/1/2/2/1 @@ -32,6 +32,7 @@ initial-keys tenant=system /Table/3/1/36/2/1 /Table/3/1/37/2/1 /Table/3/1/39/2/1 + /Table/3/1/40/2/1 /Table/5/1/0/2/1 /Table/5/1/1/2/1 /Table/5/1/16/2/1 @@ -47,6 +48,7 @@ initial-keys tenant=system /NamespaceTable/30/1/1/29/"jobs"/4/1 /NamespaceTable/30/1/1/29/"lease"/4/1 /NamespaceTable/30/1/1/29/"locations"/4/1 + /NamespaceTable/30/1/1/29/"migrations"/4/1 /NamespaceTable/30/1/1/29/"namespace"/4/1 /NamespaceTable/30/1/1/29/"namespace2"/4/1 /NamespaceTable/30/1/1/29/"protected_ts_meta"/4/1 @@ -70,7 +72,7 @@ initial-keys tenant=system /NamespaceTable/30/1/1/29/"users"/4/1 /NamespaceTable/30/1/1/29/"web_sessions"/4/1 /NamespaceTable/30/1/1/29/"zones"/4/1 -29 splits: +30 splits: /Table/11 /Table/12 /Table/13 @@ -100,10 +102,11 @@ initial-keys tenant=system /Table/37 /Table/38 /Table/39 + /Table/40 initial-keys tenant=5 ---- -60 keys: +62 keys: /Tenant/5/Table/3/1/1/2/1 /Tenant/5/Table/3/1/2/2/1 /Tenant/5/Table/3/1/3/2/1 @@ -133,6 +136,7 @@ initial-keys tenant=5 /Tenant/5/Table/3/1/36/2/1 /Tenant/5/Table/3/1/37/2/1 /Tenant/5/Table/3/1/39/2/1 + /Tenant/5/Table/3/1/40/2/1 /Tenant/5/Table/7/1/0/0 /Tenant/5/NamespaceTable/30/1/0/0/"system"/4/1 /Tenant/5/NamespaceTable/30/1/1/0/"public"/4/1 @@ -143,6 +147,7 @@ initial-keys tenant=5 /Tenant/5/NamespaceTable/30/1/1/29/"jobs"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"lease"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"locations"/4/1 + /Tenant/5/NamespaceTable/30/1/1/29/"migrations"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"namespace"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"namespace2"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"protected_ts_meta"/4/1 @@ -169,7 +174,7 @@ initial-keys tenant=5 initial-keys tenant=999 ---- -60 keys: +62 keys: /Tenant/999/Table/3/1/1/2/1 /Tenant/999/Table/3/1/2/2/1 /Tenant/999/Table/3/1/3/2/1 @@ -199,6 +204,7 @@ initial-keys tenant=999 /Tenant/999/Table/3/1/36/2/1 /Tenant/999/Table/3/1/37/2/1 /Tenant/999/Table/3/1/39/2/1 + /Tenant/999/Table/3/1/40/2/1 /Tenant/999/Table/7/1/0/0 /Tenant/999/NamespaceTable/30/1/0/0/"system"/4/1 /Tenant/999/NamespaceTable/30/1/1/0/"public"/4/1 @@ -209,6 +215,7 @@ initial-keys tenant=999 /Tenant/999/NamespaceTable/30/1/1/29/"jobs"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"lease"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"locations"/4/1 + /Tenant/999/NamespaceTable/30/1/1/29/"migrations"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"namespace"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"namespace2"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"protected_ts_meta"/4/1 diff --git a/pkg/sqlmigrations/migrations.go b/pkg/sqlmigrations/migrations.go index 8868a0032439..7571b70fc00d 100644 --- a/pkg/sqlmigrations/migrations.go +++ b/pkg/sqlmigrations/migrations.go @@ -334,14 +334,14 @@ var backwardCompatibleMigrations = []migrationDescriptor{ func staticIDs( ids ...descpb.ID, -) func(ctx context.Context, db db, codec keys.SQLCodec) ([]descpb.ID, error) { - return func(ctx context.Context, db db, codec keys.SQLCodec) ([]descpb.ID, error) { return ids, nil } +) func(ctx context.Context, db DB, codec keys.SQLCodec) ([]descpb.ID, error) { + return func(ctx context.Context, db DB, codec keys.SQLCodec) ([]descpb.ID, error) { return ids, nil } } func databaseIDs( names ...string, -) func(ctx context.Context, db db, codec keys.SQLCodec) ([]descpb.ID, error) { - return func(ctx context.Context, db db, codec keys.SQLCodec) ([]descpb.ID, error) { +) func(ctx context.Context, db DB, codec keys.SQLCodec) ([]descpb.ID, error) { + return func(ctx context.Context, db DB, codec keys.SQLCodec) ([]descpb.ID, error) { var ids []descpb.ID for _, name := range names { // This runs as part of an older migration (introduced in 2.1). We use @@ -392,7 +392,7 @@ type migrationDescriptor struct { // descriptors that were added by this migration. This is needed to automate // certain tests, which check the number of ranges/descriptors present on // server bootup. - newDescriptorIDs func(ctx context.Context, db db, codec keys.SQLCodec) ([]descpb.ID, error) + newDescriptorIDs func(ctx context.Context, db DB, codec keys.SQLCodec) ([]descpb.ID, error) } func init() { @@ -408,7 +408,7 @@ func init() { } type runner struct { - db db + db DB codec keys.SQLCodec sqlExecutor *sql.InternalExecutor settings *cluster.Settings @@ -449,9 +449,9 @@ type leaseManager interface { TimeRemaining(l *leasemanager.Lease) time.Duration } -// db is defined just to allow us to use a fake client.DB when testing this +// DB is defined just to allow us to use a fake client.DB when testing this // package. -type db interface { +type DB interface { Scan(ctx context.Context, begin, end interface{}, maxRows int64) ([]kv.KeyValue, error) Get(ctx context.Context, key interface{}) (kv.KeyValue, error) Put(ctx context.Context, key, value interface{}) error @@ -463,7 +463,7 @@ type db interface { type Manager struct { stopper *stop.Stopper leaseManager leaseManager - db db + db DB codec keys.SQLCodec sqlExecutor *sql.InternalExecutor testingKnobs MigrationManagerTestingKnobs @@ -508,7 +508,7 @@ func NewManager( // lifecycle is tightly controlled. func ExpectedDescriptorIDs( ctx context.Context, - db db, + db DB, codec keys.SQLCodec, defaultZoneConfig *zonepb.ZoneConfig, defaultSystemZoneConfig *zonepb.ZoneConfig, @@ -891,7 +891,7 @@ func (m *Manager) migrateSystemNamespace( } func getCompletedMigrations( - ctx context.Context, db db, codec keys.SQLCodec, + ctx context.Context, db DB, codec keys.SQLCodec, ) (map[string]struct{}, error) { if log.V(1) { log.Info(ctx, "trying to get the list of completed migrations") @@ -913,14 +913,26 @@ func migrationKey(codec keys.SQLCodec, migration migrationDescriptor) roachpb.Ke } func createSystemTable(ctx context.Context, r runner, desc catalog.TableDescriptor) error { + return CreateSystemTable(ctx, r.db, r.codec, r.settings, desc) +} + +// CreateSystemTable is a function to inject a new system table. If the table +// already exists, ths function is a no-op. +func CreateSystemTable( + ctx context.Context, + db DB, + codec keys.SQLCodec, + settings *cluster.Settings, + desc catalog.TableDescriptor, +) error { // We install the table at the KV layer so that we can choose a known ID in // the reserved ID space. (The SQL layer doesn't allow this.) - err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() - tKey := catalogkv.MakePublicTableNameKey(ctx, r.settings, desc.GetParentID(), desc.GetName()) - b.CPut(tKey.Key(r.codec), desc.GetID(), nil) - b.CPut(catalogkeys.MakeDescMetadataKey(r.codec, desc.GetID()), desc.DescriptorProto(), nil) - if err := txn.SetSystemConfigTrigger(r.codec.ForSystemTenant()); err != nil { + tKey := catalogkv.MakePublicTableNameKey(ctx, settings, desc.GetParentID(), desc.GetName()) + b.CPut(tKey.Key(codec), desc.GetID(), nil) + b.CPut(catalogkeys.MakeDescMetadataKey(codec, desc.GetID()), desc.DescriptorProto(), nil) + if err := txn.SetSystemConfigTrigger(codec.ForSystemTenant()); err != nil { return err } return txn.Run(ctx, b) @@ -1137,7 +1149,7 @@ func createDefaultDbs(ctx context.Context, r runner) error { for retry := retry.Start(retry.Options{MaxRetries: 5}); retry.Next(); { for _, dbName := range []string{catalogkeys.DefaultDatabaseName, catalogkeys.PgDatabaseName} { stmt := fmt.Sprintf(createDbStmt, dbName) - err = r.execAsRoot(ctx, "create-default-db", stmt) + err = r.execAsRoot(ctx, "create-default-DB", stmt) if err != nil { log.Warningf(ctx, "failed attempt to add database %q: %s", dbName, err) break diff --git a/pkg/ts/catalog/chart_catalog.go b/pkg/ts/catalog/chart_catalog.go index 8f805fa50039..5b8fca814e75 100644 --- a/pkg/ts/catalog/chart_catalog.go +++ b/pkg/ts/catalog/chart_catalog.go @@ -2321,6 +2321,7 @@ var charts = []sectionDescription{ "jobs.schema_change_gc.currently_running", "jobs.typedesc_schema_change.currently_running", "jobs.stream_ingestion.currently_running", + "jobs.migration.currently_running", }, }, { @@ -2454,6 +2455,17 @@ var charts = []sectionDescription{ "jobs.stream_ingestion.resume_retry_error", }, }, + { + Title: "Long Running Migrations", + Metrics: []string{ + "jobs.migration.fail_or_cancel_completed", + "jobs.migration.fail_or_cancel_failed", + "jobs.migration.fail_or_cancel_retry_error", + "jobs.migration.resume_completed", + "jobs.migration.resume_failed", + "jobs.migration.resume_retry_error", + }, + }, }, }, }