From bcee574c3f1623d42cc92ed03fb1464aa8205b6d Mon Sep 17 00:00:00 2001 From: Marcus Gartner Date: Fri, 14 Oct 2022 12:46:22 -0400 Subject: [PATCH 1/7] util: move FastIntSet to new pkg/util/intsets Release note: None --- pkg/BUILD.bazel | 4 ++ pkg/ccl/changefeedccl/schemafeed/BUILD.bazel | 2 +- .../schemafeed/table_event_filter.go | 10 ++--- .../tenantcostserver/BUILD.bazel | 2 +- .../tenantcostserver/server_test.go | 6 +-- pkg/cmd/roachtest/tests/BUILD.bazel | 1 + .../roachtest/tests/multitenant_distsql.go | 6 +-- pkg/col/coldata/BUILD.bazel | 1 + pkg/col/coldata/batch.go | 3 +- pkg/kv/kvclient/kvcoord/BUILD.bazel | 1 + .../kvcoord/dist_sender_mux_rangefeed.go | 4 +- .../closedts/sidetransport/BUILD.bazel | 2 +- .../kvserver/closedts/sidetransport/sender.go | 4 +- pkg/roachprod/install/BUILD.bazel | 2 +- pkg/roachprod/install/nodes.go | 4 +- pkg/roachprod/vm/local/BUILD.bazel | 2 +- pkg/roachprod/vm/local/local.go | 4 +- pkg/sql/BUILD.bazel | 2 + pkg/sql/backfill/BUILD.bazel | 2 +- pkg/sql/backfill/index_backfiller_cols.go | 6 +-- pkg/sql/catalog/BUILD.bazel | 2 + pkg/sql/catalog/bootstrap/BUILD.bazel | 1 - pkg/sql/catalog/bootstrap/kv_writer.go | 3 +- pkg/sql/catalog/descpb/BUILD.bazel | 2 +- pkg/sql/catalog/descpb/structured.go | 6 +-- pkg/sql/catalog/descriptor_id_set.go | 4 +- pkg/sql/catalog/descs/BUILD.bazel | 2 +- pkg/sql/catalog/descs/hydrate.go | 4 +- pkg/sql/catalog/internal/validate/BUILD.bazel | 2 +- .../internal/validate/schema_changer_state.go | 12 +++--- .../catalog/post_deserialization_changes.go | 4 +- pkg/sql/catalog/table_col_set.go | 4 +- pkg/sql/catalog/table_col_set_test.go | 4 +- pkg/sql/catalog/table_elements.go | 4 +- pkg/sql/catalog/tabledesc/BUILD.bazel | 1 + pkg/sql/catalog/tabledesc/structured.go | 4 +- pkg/sql/catalog/tabledesc/validate.go | 6 +-- pkg/sql/check.go | 4 +- pkg/sql/colencoding/BUILD.bazel | 2 +- pkg/sql/colencoding/key_encoding.go | 4 +- pkg/sql/colexec/colbuilder/BUILD.bazel | 2 +- pkg/sql/colexec/colbuilder/execplan.go | 4 +- pkg/sql/colexec/colexecspan/BUILD.bazel | 2 +- .../colexecspan/span_assembler_test.go | 4 +- pkg/sql/colfetcher/BUILD.bazel | 1 + pkg/sql/colfetcher/cfetcher.go | 8 ++-- pkg/sql/colfetcher/index_join.go | 3 +- pkg/sql/colmem/BUILD.bazel | 2 +- pkg/sql/colmem/allocator.go | 8 ++-- pkg/sql/delegate/BUILD.bazel | 2 +- pkg/sql/delegate/show_grants.go | 4 +- pkg/sql/delete_preserving_index_test.go | 4 +- pkg/sql/distinct.go | 6 +-- pkg/sql/distsql_physical_planner.go | 12 +++--- pkg/sql/drop_function.go | 4 +- pkg/sql/exec_factory_util.go | 4 +- pkg/sql/execinfra/BUILD.bazel | 2 +- pkg/sql/execinfra/processorsbase.go | 4 +- pkg/sql/execinfrapb/BUILD.bazel | 1 + pkg/sql/execinfrapb/component_stats.go | 6 +-- pkg/sql/importer/BUILD.bazel | 1 + pkg/sql/importer/read_import_pgdump.go | 6 +-- pkg/sql/insert_fast_path.go | 4 +- pkg/sql/instrumentation.go | 3 +- pkg/sql/opt/BUILD.bazel | 3 +- pkg/sql/opt/colset.go | 6 +-- pkg/sql/opt/colset_test.go | 4 +- pkg/sql/opt/constraint/BUILD.bazel | 2 +- pkg/sql/opt/constraint/constraint_test.go | 4 +- pkg/sql/opt/exec/BUILD.bazel | 2 +- pkg/sql/opt/exec/execbuilder/BUILD.bazel | 1 + pkg/sql/opt/exec/execbuilder/mutation.go | 6 +-- pkg/sql/opt/exec/explain/BUILD.bazel | 1 + pkg/sql/opt/exec/explain/plan_gist_factory.go | 5 ++- pkg/sql/opt/exec/factory.go | 8 ++-- pkg/sql/opt/indexrec/BUILD.bazel | 2 +- pkg/sql/opt/indexrec/candidate.go | 4 +- pkg/sql/opt/indexrec/hypothetical_index.go | 4 +- pkg/sql/opt/indexrec/hypothetical_table.go | 4 +- pkg/sql/opt/indexrec/rec.go | 39 ++++++++++--------- pkg/sql/opt/lookupjoin/BUILD.bazel | 2 +- pkg/sql/opt/lookupjoin/constraint_builder.go | 4 +- pkg/sql/opt/memo/BUILD.bazel | 5 +-- pkg/sql/opt/memo/expr.go | 4 +- pkg/sql/opt/memo/extract.go | 4 +- pkg/sql/opt/memo/interner_test.go | 11 +++--- pkg/sql/opt/metadata.go | 4 +- pkg/sql/opt/norm/BUILD.bazel | 1 + pkg/sql/opt/norm/factory.go | 6 +-- pkg/sql/opt/norm/inline_funcs.go | 6 +-- pkg/sql/opt/norm/project_funcs.go | 4 +- pkg/sql/opt/norm/prune_cols_funcs.go | 4 +- pkg/sql/opt/norm/reject_nulls_funcs.go | 8 ++-- pkg/sql/opt/optbuilder/BUILD.bazel | 1 + pkg/sql/opt/optbuilder/arbiter_set.go | 12 +++--- pkg/sql/opt/optbuilder/create_function.go | 6 +-- pkg/sql/opt/optbuilder/create_view.go | 3 +- pkg/sql/opt/optbuilder/fk_cascade.go | 4 +- pkg/sql/opt/optbuilder/insert.go | 8 ++-- pkg/sql/opt/optbuilder/join.go | 6 +-- pkg/sql/opt/optbuilder/mutation_builder.go | 14 +++---- .../optbuilder/mutation_builder_arbiter.go | 12 +++--- .../opt/optbuilder/mutation_builder_unique.go | 8 ++-- pkg/sql/opt/partialidx/BUILD.bazel | 2 +- pkg/sql/opt/partialidx/implicator.go | 4 +- pkg/sql/opt/partition/BUILD.bazel | 4 +- pkg/sql/opt/partition/locality.go | 12 +++--- pkg/sql/opt/partition/locality_test.go | 4 +- pkg/sql/opt/props/func_dep.go | 2 +- pkg/sql/opt/schema_dependencies.go | 6 +-- pkg/sql/opt/testutils/opttester/BUILD.bazel | 1 + pkg/sql/opt/testutils/opttester/opt_tester.go | 11 +++--- pkg/sql/opt/testutils/testcat/BUILD.bazel | 2 +- pkg/sql/opt/testutils/testcat/create_table.go | 4 +- pkg/sql/opt/xform/BUILD.bazel | 3 +- pkg/sql/opt/xform/coster.go | 15 ++++--- pkg/sql/opt/xform/explorer.go | 4 +- pkg/sql/opt/xform/join_funcs.go | 8 ++-- pkg/sql/opt/xform/join_order_builder.go | 4 +- pkg/sql/opt/xform/optimizer.go | 8 ++-- pkg/sql/opt/xform/scan_funcs.go | 8 ++-- pkg/sql/opt/xform/select_funcs.go | 4 +- pkg/sql/opt_exec_factory.go | 4 +- pkg/sql/physicalplan/BUILD.bazel | 2 +- pkg/sql/physicalplan/physical_plan.go | 4 +- pkg/sql/row/BUILD.bazel | 1 + pkg/sql/row/fetcher.go | 4 +- pkg/sql/row/helper.go | 6 +-- pkg/sql/row/partial_index.go | 6 +-- pkg/sql/row/row_converter.go | 3 +- pkg/sql/rowenc/BUILD.bazel | 2 +- pkg/sql/rowenc/index_encoding.go | 10 ++--- pkg/sql/rowexec/BUILD.bazel | 3 +- pkg/sql/rowexec/aggregator_test.go | 4 +- pkg/sql/rowexec/joinreader_test.go | 6 +-- pkg/sql/rowexec/mergejoiner.go | 6 +-- pkg/sql/rowexec/sample_aggregator.go | 5 ++- pkg/sql/rowexec/sampler.go | 6 +-- pkg/sql/schemachanger/rel/BUILD.bazel | 1 + pkg/sql/schemachanger/rel/query.go | 3 +- pkg/sql/schemachanger/rel/query_data.go | 4 +- pkg/sql/schemachanger/rel/query_eval.go | 7 ++-- pkg/sql/schemachanger/rel/reltest/BUILD.bazel | 2 +- pkg/sql/schemachanger/rel/reltest/database.go | 6 +-- pkg/sql/schemachanger/scexec/BUILD.bazel | 2 +- .../scexec/backfiller/BUILD.bazel | 2 +- .../scexec/backfiller/tracker.go | 6 +-- pkg/sql/schemachanger/scexec/gc_jobs.go | 4 +- pkg/sql/sem/builtins/BUILD.bazel | 1 + pkg/sql/sem/builtins/geo_builtins.go | 4 +- pkg/sql/sem/catid/BUILD.bazel | 2 +- pkg/sql/sem/catid/index_id_set.go | 4 +- pkg/sql/sem/tree/BUILD.bazel | 1 + pkg/sql/sem/tree/constant.go | 4 +- pkg/sql/sem/tree/overload.go | 16 ++++---- pkg/sql/sem/tree/type_check.go | 18 +++++---- pkg/sql/span/BUILD.bazel | 4 +- pkg/sql/span/span_splitter.go | 6 +-- pkg/sql/span/span_splitter_test.go | 22 +++++------ pkg/sql/sqlstats/insights/BUILD.bazel | 2 +- pkg/sql/sqlstats/insights/registry.go | 4 +- pkg/sql/stats/BUILD.bazel | 4 +- pkg/sql/stats/row_sampling.go | 6 +-- pkg/sql/stats/row_sampling_test.go | 4 +- pkg/sql/stats/util.go | 4 +- pkg/sql/stmtdiagnostics/BUILD.bazel | 2 +- .../stmtdiagnostics/statement_diagnostics.go | 4 +- pkg/sql/temporary_schema.go | 4 +- pkg/sql/type_change.go | 6 +-- .../upgrades/alter_jobs_add_job_type_test.go | 4 +- pkg/util/BUILD.bazel | 5 --- pkg/util/intsets/BUILD.bazel | 28 +++++++++++++ pkg/util/{ => intsets}/fast_int_set.go | 9 ++++- pkg/util/{ => intsets}/fast_int_set_large.go | 2 +- pkg/util/{ => intsets}/fast_int_set_small.go | 2 +- pkg/util/{ => intsets}/fast_int_set_str.go | 2 +- pkg/util/{ => intsets}/fast_int_set_test.go | 2 +- .../{ => intsets}/fast_int_set_testonly.go | 2 +- pkg/util/json/BUILD.bazel | 1 + pkg/util/json/json.go | 6 +-- 180 files changed, 469 insertions(+), 403 deletions(-) create mode 100644 pkg/util/intsets/BUILD.bazel rename pkg/util/{ => intsets}/fast_int_set.go (98%) rename pkg/util/{ => intsets}/fast_int_set_large.go (96%) rename pkg/util/{ => intsets}/fast_int_set_small.go (96%) rename pkg/util/{ => intsets}/fast_int_set_str.go (98%) rename pkg/util/{ => intsets}/fast_int_set_test.go (99%) rename pkg/util/{ => intsets}/fast_int_set_testonly.go (99%) diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index 52cdce056303..1f9954dff33e 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -575,6 +575,7 @@ ALL_TESTS = [ "//pkg/util/humanizeutil:humanizeutil_test", "//pkg/util/interval/generic:generic_test", "//pkg/util/interval:interval_test", + "//pkg/util/intsets:intsets_test", "//pkg/util/ipaddr:ipaddr_test", "//pkg/util/json/tokenizer:tokenizer_test", "//pkg/util/json:json_disallowed_imports_test", @@ -2012,6 +2013,8 @@ GO_TARGETS = [ "//pkg/util/interval/generic:generic_test", "//pkg/util/interval:interval", "//pkg/util/interval:interval_test", + "//pkg/util/intsets:intsets", + "//pkg/util/intsets:intsets_test", "//pkg/util/ioctx:ioctx", "//pkg/util/ipaddr:ipaddr", "//pkg/util/ipaddr:ipaddr_test", @@ -3041,6 +3044,7 @@ GET_X_DATA_TARGETS = [ "//pkg/util/humanizeutil:get_x_data", "//pkg/util/interval:get_x_data", "//pkg/util/interval/generic:get_x_data", + "//pkg/util/intsets:get_x_data", "//pkg/util/ioctx:get_x_data", "//pkg/util/ipaddr:get_x_data", "//pkg/util/iterutil:get_x_data", diff --git a/pkg/ccl/changefeedccl/schemafeed/BUILD.bazel b/pkg/ccl/changefeedccl/schemafeed/BUILD.bazel index 5b36d8a7e1ae..1cc92d28ee68 100644 --- a/pkg/ccl/changefeedccl/schemafeed/BUILD.bazel +++ b/pkg/ccl/changefeedccl/schemafeed/BUILD.bazel @@ -30,10 +30,10 @@ go_library( "//pkg/sql/schemachanger/scpb", "//pkg/sql/sem/tree", "//pkg/storage", - "//pkg/util", "//pkg/util/contextutil", "//pkg/util/encoding", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/metric", "//pkg/util/syncutil", diff --git a/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go b/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go index 70bfc1ea52cc..72ec32ad2216 100644 --- a/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go +++ b/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -199,7 +199,7 @@ func droppedColumnIsWatched(e TableEvent, targets changefeedbase.Targets) (bool, return true, nil } - var watchedColumnIDs util.FastIntSet + var watchedColumnIDs intsets.FastIntSet if err := e.Before.ForeachFamily(func(family *descpb.ColumnFamilyDescriptor) error { if _, ok := specifiedColumnFamiliesForTable[family.Name]; ok { for _, columnID := range family.ColumnIDs { @@ -235,11 +235,11 @@ func addedColumnIsWatched(e TableEvent, targets changefeedbase.Targets) (bool, e return false, nil } - var beforeCols util.FastIntSet + var beforeCols intsets.FastIntSet for _, col := range e.Before.VisibleColumns() { beforeCols.Add(int(col.GetID())) } - var addedCols util.FastIntSet + var addedCols intsets.FastIntSet for _, col := range e.After.VisibleColumns() { colID := int(col.GetID()) if !beforeCols.Contains(colID) { @@ -358,7 +358,7 @@ func hasNewPrimaryIndexWithNoVisibleColumnChanges( ) (cols catalog.TableColSet) { // Generate a set of watched columns if the targets contains specific columns. - var targetedCols util.FastIntSet + var targetedCols intsets.FastIntSet if hasSpecificColumnTargets { err := tab.ForeachFamily(func(fam *descpb.ColumnFamilyDescriptor) error { if _, ok := targetFamilies[fam.Name]; ok { diff --git a/pkg/ccl/multitenantccl/tenantcostserver/BUILD.bazel b/pkg/ccl/multitenantccl/tenantcostserver/BUILD.bazel index dc2e9f0df99e..c17ea53148b7 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/BUILD.bazel +++ b/pkg/ccl/multitenantccl/tenantcostserver/BUILD.bazel @@ -62,7 +62,7 @@ go_test( "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/metric", diff --git a/pkg/ccl/multitenantccl/tenantcostserver/server_test.go b/pkg/ccl/multitenantccl/tenantcostserver/server_test.go index 23463bfd6c4b..f539ca70d541 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/server_test.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/server_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/metrictestutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" @@ -327,7 +327,7 @@ func TestInstanceCleanup(t *testing.T) { // Note: this number needs to be at most maxInstancesCleanup. const maxInstances = 10 - var liveset, prev util.FastIntSet + var liveset, prev intsets.FastIntSet for steps := 0; steps < 100; steps++ { // Keep the previous set for debugging. @@ -370,7 +370,7 @@ func TestInstanceCleanup(t *testing.T) { rows := ts.r.Query(t, "SELECT instance_id FROM system.tenant_usage WHERE tenant_id = 5 AND instance_id > 0", ) - var serverSet util.FastIntSet + var serverSet intsets.FastIntSet for rows.Next() { var id int if err := rows.Scan(&id); err != nil { diff --git a/pkg/cmd/roachtest/tests/BUILD.bazel b/pkg/cmd/roachtest/tests/BUILD.bazel index 1552056f1301..c2d8e57198d3 100644 --- a/pkg/cmd/roachtest/tests/BUILD.bazel +++ b/pkg/cmd/roachtest/tests/BUILD.bazel @@ -217,6 +217,7 @@ go_library( "//pkg/util/hlc", "//pkg/util/httputil", "//pkg/util/humanizeutil", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/protoutil", "//pkg/util/randutil", diff --git a/pkg/cmd/roachtest/tests/multitenant_distsql.go b/pkg/cmd/roachtest/tests/multitenant_distsql.go index 24167bb11bb5..3b60b3f0384b 100644 --- a/pkg/cmd/roachtest/tests/multitenant_distsql.go +++ b/pkg/cmd/roachtest/tests/multitenant_distsql.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" "github.com/cockroachdb/cockroach/pkg/roachprod/install" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/stretchr/testify/require" ) @@ -95,7 +95,7 @@ func runMultiTenantDistSQL( require.NoError(t, err) // Create numInstances sql pods and spread them evenly across the machines. - var nodes util.FastIntSet + var nodes intsets.FastIntSet nodes.Add(1) for i := 1; i < numInstances; i++ { node := ((i + 1) % c.Spec().NodeCount) + 1 @@ -156,7 +156,7 @@ func runMultiTenantDistSQL( continue } - var nodesInPlan util.FastIntSet + var nodesInPlan intsets.FastIntSet for res.Next() { str := "" err = res.Scan(&str) diff --git a/pkg/col/coldata/BUILD.bazel b/pkg/col/coldata/BUILD.bazel index 2e9f0707a992..29c9e3908910 100644 --- a/pkg/col/coldata/BUILD.bazel +++ b/pkg/col/coldata/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//pkg/util", "//pkg/util/buildutil", "//pkg/util/duration", + "//pkg/util/intsets", "//pkg/util/json", "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/col/coldata/batch.go b/pkg/col/coldata/batch.go index 5c02fdbc5da2..c146288c5e7f 100644 --- a/pkg/col/coldata/batch.go +++ b/pkg/col/coldata/batch.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -209,7 +210,7 @@ type MemBatch struct { // b is the slice of columns in this batch. b []Vec // datumVecIdxs stores the indices of all datum-backed vectors in b. - datumVecIdxs util.FastIntSet + datumVecIdxs intsets.FastIntSet useSel bool // sel is - if useSel is true - a selection vector from upstream. A // selection vector is a list of selected tuple indices in this memBatch's diff --git a/pkg/kv/kvclient/kvcoord/BUILD.bazel b/pkg/kv/kvclient/kvcoord/BUILD.bazel index f9a5d47fb5e5..f8391dc612b6 100644 --- a/pkg/kv/kvclient/kvcoord/BUILD.bazel +++ b/pkg/kv/kvclient/kvcoord/BUILD.bazel @@ -70,6 +70,7 @@ go_library( "//pkg/util/errorutil/unimplemented", "//pkg/util/grpcutil", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/iterutil", "//pkg/util/limit", "//pkg/util/log", diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_mux_rangefeed.go b/pkg/kv/kvclient/kvcoord/dist_sender_mux_rangefeed.go index e74f78d9b38e..dd213b5da02f 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_mux_rangefeed.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_mux_rangefeed.go @@ -15,8 +15,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" ) @@ -55,7 +55,7 @@ type rangefeedMuxer struct { type muxClientState struct { client roachpb.Internal_MuxRangeFeedClient - streams util.FastIntSet + streams intsets.FastIntSet cancel context.CancelFunc } diff --git a/pkg/kv/kvserver/closedts/sidetransport/BUILD.bazel b/pkg/kv/kvserver/closedts/sidetransport/BUILD.bazel index 05f2764e4e04..8126a02008bd 100644 --- a/pkg/kv/kvserver/closedts/sidetransport/BUILD.bazel +++ b/pkg/kv/kvserver/closedts/sidetransport/BUILD.bazel @@ -20,8 +20,8 @@ go_library( "//pkg/rpc", "//pkg/rpc/nodedialer", "//pkg/settings/cluster", - "//pkg/util", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/stop", "//pkg/util/syncutil", diff --git a/pkg/kv/kvserver/closedts/sidetransport/sender.go b/pkg/kv/kvserver/closedts/sidetransport/sender.go index 3b30c582e995..b1c742d28c1e 100644 --- a/pkg/kv/kvserver/closedts/sidetransport/sender.go +++ b/pkg/kv/kvserver/closedts/sidetransport/sender.go @@ -26,8 +26,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -356,7 +356,7 @@ func (s *Sender) publish(ctx context.Context) hlc.ClockTimestamp { // We'll accumulate all the nodes we need to connect to in order to check if // we need to open new connections or close existing ones. - nodesWithFollowers := util.MakeFastIntSet() + nodesWithFollowers := intsets.MakeFastIntSet() // If there's any tracked ranges for which we're not the leaseholder any more, // we need to untrack them and tell the connections about it. diff --git a/pkg/roachprod/install/BUILD.bazel b/pkg/roachprod/install/BUILD.bazel index 0c41730b9ac4..70a6cb005242 100644 --- a/pkg/roachprod/install/BUILD.bazel +++ b/pkg/roachprod/install/BUILD.bazel @@ -31,7 +31,7 @@ go_library( "//pkg/roachprod/ui", "//pkg/roachprod/vm/aws", "//pkg/roachprod/vm/local", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/retry", "//pkg/util/syncutil", diff --git a/pkg/roachprod/install/nodes.go b/pkg/roachprod/install/nodes.go index f1372dd5af69..7ea3fc061fdc 100644 --- a/pkg/roachprod/install/nodes.go +++ b/pkg/roachprod/install/nodes.go @@ -15,7 +15,7 @@ import ( "strconv" "strings" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -48,7 +48,7 @@ func ListNodes(s string, numNodesInCluster int) (Nodes, error) { return allNodes(numNodesInCluster), nil } - var set util.FastIntSet + var set intsets.FastIntSet for _, p := range strings.Split(s, ",") { parts := strings.Split(p, "-") switch len(parts) { diff --git a/pkg/roachprod/vm/local/BUILD.bazel b/pkg/roachprod/vm/local/BUILD.bazel index 96dbc88836f1..588af7480f00 100644 --- a/pkg/roachprod/vm/local/BUILD.bazel +++ b/pkg/roachprod/vm/local/BUILD.bazel @@ -11,7 +11,7 @@ go_library( "//pkg/roachprod/config", "//pkg/roachprod/logger", "//pkg/roachprod/vm", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", "@com_github_spf13_pflag//:pflag", diff --git a/pkg/roachprod/vm/local/local.go b/pkg/roachprod/vm/local/local.go index 4f883112fd1e..60046bbc6b4e 100644 --- a/pkg/roachprod/vm/local/local.go +++ b/pkg/roachprod/vm/local/local.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachprod/config" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/roachprod/vm" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/spf13/pflag" @@ -158,7 +158,7 @@ func (p *Provider) Create( // We will need to assign ports to the nodes, and they must not conflict with // any other local clusters. - var portsTaken util.FastIntSet + var portsTaken intsets.FastIntSet for _, c := range p.clusters { for i := range c.VMs { portsTaken.Add(c.VMs[i].SQLPort) diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel index 30b72ae9b01b..eefd7baf76df 100644 --- a/pkg/sql/BUILD.bazel +++ b/pkg/sql/BUILD.bazel @@ -488,6 +488,7 @@ go_library( "//pkg/util/hlc", "//pkg/util/humanizeutil", "//pkg/util/interval", + "//pkg/util/intsets", "//pkg/util/ioctx", "//pkg/util/iterutil", "//pkg/util/json", @@ -780,6 +781,7 @@ go_test( "//pkg/util/fsm", "//pkg/util/hlc", "//pkg/util/httputil", + "//pkg/util/intsets", "//pkg/util/json", "//pkg/util/leaktest", "//pkg/util/log", diff --git a/pkg/sql/backfill/BUILD.bazel b/pkg/sql/backfill/BUILD.bazel index f9cc1e939cc9..b9ecb074c7b0 100644 --- a/pkg/sql/backfill/BUILD.bazel +++ b/pkg/sql/backfill/BUILD.bazel @@ -31,10 +31,10 @@ go_library( "//pkg/sql/sem/tree", "//pkg/sql/sqlerrors", "//pkg/sql/types", - "//pkg/util", "//pkg/util/admission/admissionpb", "//pkg/util/ctxgroup", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/mon", "//pkg/util/syncutil", diff --git a/pkg/sql/backfill/index_backfiller_cols.go b/pkg/sql/backfill/index_backfiller_cols.go index 22a975d57f53..77b20e707cbd 100644 --- a/pkg/sql/backfill/index_backfiller_cols.go +++ b/pkg/sql/backfill/index_backfiller_cols.go @@ -13,7 +13,7 @@ package backfill import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -45,7 +45,7 @@ type indexBackfillerCols struct { // valNeededForCol contains the indexes (into cols) of all columns that we // need to fetch values for. - valNeededForCol util.FastIntSet + valNeededForCol intsets.FastIntSet } // makeIndexBackfillColumns computes the set of writable columns and @@ -141,7 +141,7 @@ func makeIndexBackfillColumns( // because of references in expressions. func makeInitialValNeededForCol( ib indexBackfillerCols, addedIndexes []catalog.Index, -) (valNeededForCol util.FastIntSet) { +) (valNeededForCol intsets.FastIntSet) { // Any columns we're going to eval, we don't need values for ahead of time. toEval := func() catalog.TableColSet { columnIDs := func(columns []catalog.Column) (s catalog.TableColSet) { diff --git a/pkg/sql/catalog/BUILD.bazel b/pkg/sql/catalog/BUILD.bazel index 5881e4da112b..89becab40b45 100644 --- a/pkg/sql/catalog/BUILD.bazel +++ b/pkg/sql/catalog/BUILD.bazel @@ -38,6 +38,7 @@ go_library( "//pkg/sql/types", "//pkg/util", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/iterutil", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", @@ -62,6 +63,7 @@ go_test( "//pkg/sql/catalog/schemadesc", "//pkg/sql/catalog/tabledesc", "//pkg/util", + "//pkg/util/intsets", "//pkg/util/randutil", "@com_github_cockroachdb_redact//:redact", "@com_github_stretchr_testify//require", diff --git a/pkg/sql/catalog/bootstrap/BUILD.bazel b/pkg/sql/catalog/bootstrap/BUILD.bazel index 97cb42368cd5..c37f5bd77311 100644 --- a/pkg/sql/catalog/bootstrap/BUILD.bazel +++ b/pkg/sql/catalog/bootstrap/BUILD.bazel @@ -23,7 +23,6 @@ go_library( "//pkg/sql/sem/catconstants", "//pkg/sql/sem/catid", "//pkg/sql/sem/tree", - "//pkg/util", "//pkg/util/iterutil", "//pkg/util/log", "//pkg/util/protoutil", diff --git a/pkg/sql/catalog/bootstrap/kv_writer.go b/pkg/sql/catalog/bootstrap/kv_writer.go index a04db8192943..ad8e893ab045 100644 --- a/pkg/sql/catalog/bootstrap/kv_writer.go +++ b/pkg/sql/catalog/bootstrap/kv_writer.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -35,7 +34,7 @@ type KVWriter struct { codec keys.SQLCodec tableDesc catalog.TableDescriptor colIDtoRowIndex catalog.TableColMap - skippedFamilyIDs util.FastIntSet + skippedFamilyIDs intsets.FastIntSet } // RecordToKeyValues transforms a table record into the corresponding key-value diff --git a/pkg/sql/catalog/descpb/BUILD.bazel b/pkg/sql/catalog/descpb/BUILD.bazel index 6664e3657a52..d8324e776aaa 100644 --- a/pkg/sql/catalog/descpb/BUILD.bazel +++ b/pkg/sql/catalog/descpb/BUILD.bazel @@ -31,10 +31,10 @@ go_library( "//pkg/sql/sem/catid", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", "//pkg/util/encoding", "//pkg/util/errorutil/unimplemented", "//pkg/util/hlc", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/catalog/descpb/structured.go b/pkg/sql/catalog/descpb/structured.go index 0811c9860e52..f61db366c664 100644 --- a/pkg/sql/catalog/descpb/structured.go +++ b/pkg/sql/catalog/descpb/structured.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // ID, ColumnID, FamilyID, and IndexID are all uint32, but are each given a @@ -158,12 +158,12 @@ func (c ColumnIDs) Equals(input ColumnIDs) bool { // PermutationOf returns true if this list and the input list contain the same // set of column IDs in any order. Duplicate ColumnIDs have no effect. func (c ColumnIDs) PermutationOf(input ColumnIDs) bool { - ourColsSet := util.MakeFastIntSet() + ourColsSet := intsets.MakeFastIntSet() for _, col := range c { ourColsSet.Add(int(col)) } - inputColsSet := util.MakeFastIntSet() + inputColsSet := intsets.MakeFastIntSet() for _, inputCol := range input { inputColsSet.Add(int(inputCol)) } diff --git a/pkg/sql/catalog/descriptor_id_set.go b/pkg/sql/catalog/descriptor_id_set.go index 7a34ec820dbb..420a6bac7b05 100644 --- a/pkg/sql/catalog/descriptor_id_set.go +++ b/pkg/sql/catalog/descriptor_id_set.go @@ -12,12 +12,12 @@ package catalog import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // DescriptorIDSet efficiently stores an unordered set of descriptor ids. type DescriptorIDSet struct { - set util.FastIntSet + set intsets.FastIntSet } // MakeDescriptorIDSet returns a set initialized with the given values. diff --git a/pkg/sql/catalog/descs/BUILD.bazel b/pkg/sql/catalog/descs/BUILD.bazel index 3f37dbd02d61..58a3fe85e128 100644 --- a/pkg/sql/catalog/descs/BUILD.bazel +++ b/pkg/sql/catalog/descs/BUILD.bazel @@ -66,8 +66,8 @@ go_library( "//pkg/sql/sqlliveness", "//pkg/sql/sqlutil", "//pkg/sql/types", - "//pkg/util", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/iterutil", "//pkg/util/log", "//pkg/util/mon", diff --git a/pkg/sql/catalog/descs/hydrate.go b/pkg/sql/catalog/descs/hydrate.go index 1fd4e9617f4f..8151e72754f5 100644 --- a/pkg/sql/catalog/descs/hydrate.go +++ b/pkg/sql/catalog/descs/hydrate.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/tracing" ) @@ -53,7 +53,7 @@ func (tc *Collection) hydrateDescriptors( ctx context.Context, txn *kv.Txn, flags tree.CommonLookupFlags, descs []catalog.Descriptor, ) error { - var hydratableMutableIndexes, hydratableImmutableIndexes util.FastIntSet + var hydratableMutableIndexes, hydratableImmutableIndexes intsets.FastIntSet for i, desc := range descs { if desc == nil || !hydrateddesc.IsHydratable(desc) { continue diff --git a/pkg/sql/catalog/internal/validate/BUILD.bazel b/pkg/sql/catalog/internal/validate/BUILD.bazel index eddedc9cd273..1548ebe3c075 100644 --- a/pkg/sql/catalog/internal/validate/BUILD.bazel +++ b/pkg/sql/catalog/internal/validate/BUILD.bazel @@ -17,7 +17,7 @@ go_library( "//pkg/sql/catalog/descpb", "//pkg/sql/schemachanger/scpb", "//pkg/sql/schemachanger/screl", - "//pkg/util", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", ], diff --git a/pkg/sql/catalog/internal/validate/schema_changer_state.go b/pkg/sql/catalog/internal/validate/schema_changer_state.go index 281b0a3f5e87..fb47e50dda06 100644 --- a/pkg/sql/catalog/internal/validate/schema_changer_state.go +++ b/pkg/sql/catalog/internal/validate/schema_changer_state.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -79,7 +79,7 @@ func validateSchemaChangerState(d catalog.Descriptor, vea catalog.ValidationErro // Validate that the target ranks are unique. ranksToTarget := map[uint32]*scpb.Target{} { - var duplicates util.FastIntSet + var duplicates intsets.FastIntSet for i, r := range scs.TargetRanks { if _, exists := ranksToTarget[r]; exists { duplicates.Add(int(r)) @@ -102,17 +102,17 @@ func validateSchemaChangerState(d catalog.Descriptor, vea catalog.ValidationErro // Validate that the statements refer exclusively to targets in this // descriptor. - statementsExpected := map[uint32]*util.FastIntSet{} + statementsExpected := map[uint32]*intsets.FastIntSet{} for i := range scs.Targets { t := &scs.Targets[i] exp, ok := statementsExpected[t.Metadata.StatementID] if !ok { - exp = &util.FastIntSet{} + exp = &intsets.FastIntSet{} statementsExpected[t.Metadata.StatementID] = exp } exp.Add(int(scs.TargetRanks[i])) } - var statementRanks util.FastIntSet + var statementRanks intsets.FastIntSet for _, s := range scs.RelevantStatements { statementRanks.Add(int(s.StatementRank)) if _, ok := statementsExpected[s.StatementRank]; !ok { @@ -128,7 +128,7 @@ func validateSchemaChangerState(d catalog.Descriptor, vea catalog.ValidationErro // Validate that all targets have a corresponding statement. { - var expected util.FastIntSet + var expected intsets.FastIntSet stmts := statementRanks.Copy() for rank := range statementsExpected { expected.Add(int(rank)) diff --git a/pkg/sql/catalog/post_deserialization_changes.go b/pkg/sql/catalog/post_deserialization_changes.go index 2f149f299a31..3f039b127ef9 100644 --- a/pkg/sql/catalog/post_deserialization_changes.go +++ b/pkg/sql/catalog/post_deserialization_changes.go @@ -10,7 +10,7 @@ package catalog -import "github.com/cockroachdb/cockroach/pkg/util" +import "github.com/cockroachdb/cockroach/pkg/util/intsets" // PostDeserializationChangeType is used to indicate the type of // PostDeserializationChange which occurred for a descriptor. @@ -19,7 +19,7 @@ type PostDeserializationChangeType int // PostDeserializationChanges are a set of booleans to indicate which types of // upgrades or fixes occurred when filling in the descriptor after // deserialization. -type PostDeserializationChanges struct{ s util.FastIntSet } +type PostDeserializationChanges struct{ s intsets.FastIntSet } // HasChanges returns true if the set of changes is non-empty. func (c PostDeserializationChanges) HasChanges() bool { diff --git a/pkg/sql/catalog/table_col_set.go b/pkg/sql/catalog/table_col_set.go index 7f3d6753ef0e..8676ded4d331 100644 --- a/pkg/sql/catalog/table_col_set.go +++ b/pkg/sql/catalog/table_col_set.go @@ -12,12 +12,12 @@ package catalog import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // TableColSet efficiently stores an unordered set of column ids. type TableColSet struct { - set util.FastIntSet + set intsets.FastIntSet } // MakeTableColSet returns a set initialized with the given values. diff --git a/pkg/sql/catalog/table_col_set_test.go b/pkg/sql/catalog/table_col_set_test.go index 683bdbee166f..d2bf59691f40 100644 --- a/pkg/sql/catalog/table_col_set_test.go +++ b/pkg/sql/catalog/table_col_set_test.go @@ -14,7 +14,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) func BenchmarkTableColSet(b *testing.B) { @@ -23,7 +23,7 @@ func BenchmarkTableColSet(b *testing.B) { const n = 50 b.Run("fastintset", func(b *testing.B) { for i := 0; i < b.N; i++ { - var c util.FastIntSet + var c intsets.FastIntSet for j := 0; j < n; j++ { c.Add(j) } diff --git a/pkg/sql/catalog/table_elements.go b/pkg/sql/catalog/table_elements.go index c9a9152482e7..a1cb28583cad 100644 --- a/pkg/sql/catalog/table_elements.go +++ b/pkg/sql/catalog/table_elements.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/iterutil" ) @@ -930,7 +930,7 @@ func UserDefinedTypeColsInFamilyHaveSameVersion( return false, err } - familyCols := util.FastIntSet{} + familyCols := intsets.FastIntSet{} for _, colID := range family.ColumnIDs { familyCols.Add(int(colID)) } diff --git a/pkg/sql/catalog/tabledesc/BUILD.bazel b/pkg/sql/catalog/tabledesc/BUILD.bazel index cf7fdd65937c..0bd4bd519fec 100644 --- a/pkg/sql/catalog/tabledesc/BUILD.bazel +++ b/pkg/sql/catalog/tabledesc/BUILD.bazel @@ -54,6 +54,7 @@ go_library( "//pkg/util/errorutil/unimplemented", "//pkg/util/hlc", "//pkg/util/interval", + "//pkg/util/intsets", "//pkg/util/iterutil", "//pkg/util/protoutil", "//pkg/util/timeutil", diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index 809b59d32823..621f40426202 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -36,9 +36,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" @@ -2042,7 +2042,7 @@ func (desc *wrapper) MakeFirstMutationPublic( } type mutationPublicationPolicy struct { - policy util.FastIntSet + policy intsets.FastIntSet } func makeMutationPublicationPolicy( diff --git a/pkg/sql/catalog/tabledesc/validate.go b/pkg/sql/catalog/tabledesc/validate.go index d4e71e351f15..5f2bc9f874cd 100644 --- a/pkg/sql/catalog/tabledesc/validate.go +++ b/pkg/sql/catalog/tabledesc/validate.go @@ -29,9 +29,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" "github.com/cockroachdb/cockroach/pkg/util/interval" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" ) @@ -658,7 +658,7 @@ func (desc *wrapper) ValidateSelf(vea catalog.ValidationErrorAccumulator) { // Validate mutations and exit early if any of these are deeply corrupted. { - var mutationIDs util.FastIntSet + var mutationIDs intsets.FastIntSet mutationsHaveErrs := false for _, m := range desc.Mutations { mutationIDs.Add(int(m.MutationID)) @@ -1161,7 +1161,7 @@ func (desc *wrapper) validateUniqueWithoutIndexConstraints( } // Verify that the constraint's column IDs are valid and unique. - var seen util.FastIntSet + var seen intsets.FastIntSet for i, n := 0, c.NumKeyColumns(); i < n; i++ { colID := c.GetKeyColumnID(i) _, ok := columnsByID[colID] diff --git a/pkg/sql/check.go b/pkg/sql/check.go index a957b5242ca6..c3bb6dbfedfe 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -31,7 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/errors" @@ -819,7 +819,7 @@ func formatValues(colNames []string, values tree.Datums) string { // It is allowed to check only a subset of the active checks (the optimizer // could in principle determine that some checks can't fail because they // statically evaluate to true for the entire input). -type checkSet = util.FastIntSet +type checkSet = intsets.FastIntSet // When executing mutations, we calculate a boolean column for each check // indicating if the check passed. This function verifies that each result is diff --git a/pkg/sql/colencoding/BUILD.bazel b/pkg/sql/colencoding/BUILD.bazel index 1f056b56edce..a7da88bdf472 100644 --- a/pkg/sql/colencoding/BUILD.bazel +++ b/pkg/sql/colencoding/BUILD.bazel @@ -18,10 +18,10 @@ go_library( "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", "//pkg/util/buildutil", "//pkg/util/duration", "//pkg/util/encoding", + "//pkg/util/intsets", "//pkg/util/uuid", "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/sql/colencoding/key_encoding.go b/pkg/sql/colencoding/key_encoding.go index 859f9ee53636..212b3437fb33 100644 --- a/pkg/sql/colencoding/key_encoding.go +++ b/pkg/sql/colencoding/key_encoding.go @@ -22,10 +22,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/duration" "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -52,7 +52,7 @@ func DecodeKeyValsToCols( indexColIdx []int, checkAllColsForNull bool, keyCols []descpb.IndexFetchSpec_KeyColumn, - unseen *util.FastIntSet, + unseen *intsets.FastIntSet, key []byte, scratch []byte, ) (remainingKey []byte, foundNull bool, retScratch []byte, _ error) { diff --git a/pkg/sql/colexec/colbuilder/BUILD.bazel b/pkg/sql/colexec/colbuilder/BUILD.bazel index c68247dd4ec0..ac9d725d245c 100644 --- a/pkg/sql/colexec/colbuilder/BUILD.bazel +++ b/pkg/sql/colexec/colbuilder/BUILD.bazel @@ -37,8 +37,8 @@ go_library( "//pkg/sql/sem/tree/treecmp", "//pkg/sql/sessiondatapb", "//pkg/sql/types", - "//pkg/util", "//pkg/util/buildutil", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/mon", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/sql/colexec/colbuilder/execplan.go b/pkg/sql/colexec/colbuilder/execplan.go index 666e84161fe8..1791dc33e2ae 100644 --- a/pkg/sql/colexec/colbuilder/execplan.go +++ b/pkg/sql/colexec/colbuilder/execplan.go @@ -45,8 +45,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treecmp" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/errors" @@ -130,7 +130,7 @@ type opResult struct { } func needHashAggregator(aggSpec *execinfrapb.AggregatorSpec) (bool, error) { - var groupCols, orderedCols util.FastIntSet + var groupCols, orderedCols intsets.FastIntSet for _, col := range aggSpec.OrderedGroupCols { orderedCols.Add(int(col)) } diff --git a/pkg/sql/colexec/colexecspan/BUILD.bazel b/pkg/sql/colexec/colexecspan/BUILD.bazel index 0db353faa59d..3e0e93de0dbd 100644 --- a/pkg/sql/colexec/colexecspan/BUILD.bazel +++ b/pkg/sql/colexec/colexecspan/BUILD.bazel @@ -67,7 +67,7 @@ go_test( "//pkg/sql/span", "//pkg/sql/types", "//pkg/testutils/skip", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/randutil", diff --git a/pkg/sql/colexec/colexecspan/span_assembler_test.go b/pkg/sql/colexec/colexecspan/span_assembler_test.go index 1cff4166543d..ce266b182da2 100644 --- a/pkg/sql/colexec/colexecspan/span_assembler_test.go +++ b/pkg/sql/colexec/colexecspan/span_assembler_test.go @@ -36,7 +36,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/randutil" @@ -91,7 +91,7 @@ func TestSpanAssembler(t *testing.T) { if err != nil { t.Fatal(err) } - neededColumns := util.MakeFastIntSet(1, 2, 3, 4) + neededColumns := intsets.MakeFastIntSet(1, 2, 3, 4) cols := make([]coldata.Vec, len(typs)) for i, typ := range typs { diff --git a/pkg/sql/colfetcher/BUILD.bazel b/pkg/sql/colfetcher/BUILD.bazel index 985b13f7c6d5..dad013fb993f 100644 --- a/pkg/sql/colfetcher/BUILD.bazel +++ b/pkg/sql/colfetcher/BUILD.bazel @@ -50,6 +50,7 @@ go_library( "//pkg/util", "//pkg/util/encoding", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/mon", "//pkg/util/syncutil", diff --git a/pkg/sql/colfetcher/cfetcher.go b/pkg/sql/colfetcher/cfetcher.go index 92899fba297a..d2337a9c73a8 100644 --- a/pkg/sql/colfetcher/cfetcher.go +++ b/pkg/sql/colfetcher/cfetcher.go @@ -37,9 +37,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" @@ -52,7 +52,7 @@ type cTableInfo struct { // The set of required value-component column ordinals among only needed // columns. - neededValueColsByIdx util.FastIntSet + neededValueColsByIdx intsets.FastIntSet // Map used to get the column index based on the descpb.ColumnID. // It's kept as a pointer so we don't have to re-allocate to sort it each @@ -66,7 +66,7 @@ type cTableInfo struct { // The set of column ordinals which are both composite and part of the index // key. - compositeIndexColOrdinals util.FastIntSet + compositeIndexColOrdinals intsets.FastIntSet // One number per column coming from the "key suffix" that is part of the // value; each number is a column ordinal among only needed columns; -1 if @@ -246,7 +246,7 @@ type cFetcher struct { // remainingValueColsByIdx is the set of value columns that are yet to be // seen during the decoding of the current row. - remainingValueColsByIdx util.FastIntSet + remainingValueColsByIdx intsets.FastIntSet // lastRowPrefix is the row prefix for the last row we saw a key for. New // keys are compared against this prefix to determine whether they're part // of a new row or not. diff --git a/pkg/sql/colfetcher/index_join.go b/pkg/sql/colfetcher/index_join.go index 7a40a153b1b9..73e66a911075 100644 --- a/pkg/sql/colfetcher/index_join.go +++ b/pkg/sql/colfetcher/index_join.go @@ -38,6 +38,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" @@ -94,7 +95,7 @@ type ColIndexJoin struct { // Fields that deal with variable-size types. hasVarSizeCols bool - varSizeVecIdxs util.FastIntSet + varSizeVecIdxs intsets.FastIntSet byteLikeCols []*coldata.Bytes decimalCols []coldata.Decimals datumCols []coldata.DatumVec diff --git a/pkg/sql/colmem/BUILD.bazel b/pkg/sql/colmem/BUILD.bazel index 9ae56af67b42..7e5734d9f411 100644 --- a/pkg/sql/colmem/BUILD.bazel +++ b/pkg/sql/colmem/BUILD.bazel @@ -13,8 +13,8 @@ go_library( "//pkg/sql/memsize", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", "//pkg/util/buildutil", + "//pkg/util/intsets", "//pkg/util/mon", "@com_github_cockroachdb_errors//:errors", ], diff --git a/pkg/sql/colmem/allocator.go b/pkg/sql/colmem/allocator.go index 265335d696d3..1ddb3d8c5b2b 100644 --- a/pkg/sql/colmem/allocator.go +++ b/pkg/sql/colmem/allocator.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/memsize" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/errors" ) @@ -821,7 +821,7 @@ type SetAccountingHelper struct { allFixedLength bool // bytesLikeVecIdxs stores the indices of all bytes-like vectors. - bytesLikeVecIdxs util.FastIntSet + bytesLikeVecIdxs intsets.FastIntSet // bytesLikeVectors stores all actual bytes-like vectors. It is updated // every time a new batch is allocated. bytesLikeVectors []*coldata.Bytes @@ -830,7 +830,7 @@ type SetAccountingHelper struct { prevBytesLikeTotalSize int64 // decimalVecIdxs stores the indices of all decimal vectors. - decimalVecIdxs util.FastIntSet + decimalVecIdxs intsets.FastIntSet // decimalVecs stores all decimal vectors. They are updated every time a new // batch is allocated. decimalVecs []coldata.Decimals @@ -849,7 +849,7 @@ type SetAccountingHelper struct { // varLenDatumVecIdxs stores the indices of all datum-backed vectors with // variable-length values. - varLenDatumVecIdxs util.FastIntSet + varLenDatumVecIdxs intsets.FastIntSet // varLenDatumVecs stores all variable-sized datum-backed vectors. They are // updated every time a new batch is allocated. varLenDatumVecs []coldata.DatumVec diff --git a/pkg/sql/delegate/BUILD.bazel b/pkg/sql/delegate/BUILD.bazel index 5015cf08fc09..f80acb268d2a 100644 --- a/pkg/sql/delegate/BUILD.bazel +++ b/pkg/sql/delegate/BUILD.bazel @@ -60,8 +60,8 @@ go_library( "//pkg/sql/sem/tree", "//pkg/sql/sqltelemetry", "//pkg/sql/syntheticprivilege", - "//pkg/util", "//pkg/util/errorutil/unimplemented", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/delegate/show_grants.go b/pkg/sql/delegate/show_grants.go index 1ff063d63081..8ed433e003da 100644 --- a/pkg/sql/delegate/show_grants.go +++ b/pkg/sql/delegate/show_grants.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // delegateShowGrants implements SHOW GRANTS which returns grant details for the @@ -251,7 +251,7 @@ SELECT database_name, } else if n.Targets != nil && len(n.Targets.Functions) > 0 { fmt.Fprint(&source, udfQuery) orderBy = "1,2,3,4,5,6" - fnResolved := util.MakeFastIntSet() + fnResolved := intsets.MakeFastIntSet() for _, fn := range n.Targets.Functions { un := fn.FuncName.ToUnresolvedObjectName().ToUnresolvedName() fd, err := d.catalog.ResolveFunction(d.ctx, un, &d.evalCtx.SessionData().SearchPath) diff --git a/pkg/sql/delete_preserving_index_test.go b/pkg/sql/delete_preserving_index_test.go index 618e2a89a32e..6727a130395c 100644 --- a/pkg/sql/delete_preserving_index_test.go +++ b/pkg/sql/delete_preserving_index_test.go @@ -43,7 +43,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -740,7 +740,7 @@ func fetchIndex( idx, err := table.FindIndexWithName(indexName) require.NoError(t, err) colIdxMap := catalog.ColumnIDToOrdinalMap(table.PublicColumns()) - var valsNeeded util.FastIntSet + var valsNeeded intsets.FastIntSet { colIDsNeeded := idx.CollectKeyColumnIDs() if idx.Primary() { diff --git a/pkg/sql/distinct.go b/pkg/sql/distinct.go index d886c0b0bfdb..b2695fd45084 100644 --- a/pkg/sql/distinct.go +++ b/pkg/sql/distinct.go @@ -14,7 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // distinctNode de-duplicates rows returned by a wrapped planNode. @@ -28,11 +28,11 @@ type distinctNode struct { // Otherwise, distinctOnColIdxs is a strict subset of the child // planNode's column indices indicating which columns are specified in // the DISTINCT ON () clause. - distinctOnColIdxs util.FastIntSet + distinctOnColIdxs intsets.FastIntSet // Subset of distinctOnColIdxs on which the input guarantees an ordering. // All rows that are equal on these columns appear contiguously in the input. - columnsInOrder util.FastIntSet + columnsInOrder intsets.FastIntSet reqOrdering ReqOrdering diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index f53dab7e950f..ce3c57c9b0d8 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -49,10 +49,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/sqlinstance" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/quotapool" "github.com/cockroachdb/cockroach/pkg/util/randutil" @@ -1937,7 +1937,7 @@ func (dsp *DistSQLPlanner) planAggregators( groupCols[i] = uint32(p.PlanToStreamColMap[idx]) } orderedGroupCols := make([]uint32, len(info.groupColOrdering)) - var orderedGroupColSet util.FastIntSet + var orderedGroupColSet intsets.FastIntSet for i, c := range info.groupColOrdering { orderedGroupCols[i] = uint32(p.PlanToStreamColMap[c.ColIdx]) orderedGroupColSet.Add(c.ColIdx) @@ -2002,7 +2002,7 @@ func (dsp *DistSQLPlanner) planAggregators( // left and right inputs of the join, respectively, then columns // 0, 1, ..., m-1 refer to the corresponding "left" columns whereas // m, m+1, ..., m+n-1 refer to the "right" ones. - var joinEqCols util.FastIntSet + var joinEqCols intsets.FastIntSet m := len(prevStageProc.Input[0].ColumnTypes) for _, leftEqCol := range hjSpec.LeftEqColumns { joinEqCols.Add(int(leftEqCol)) @@ -2033,7 +2033,7 @@ func (dsp *DistSQLPlanner) planAggregators( } } if allDistinct { - var distinctColumnsSet util.FastIntSet + var distinctColumnsSet intsets.FastIntSet for _, e := range info.aggregations { for _, colIdx := range e.ColIdx { distinctColumnsSet.Add(int(colIdx)) @@ -2588,7 +2588,7 @@ func (dsp *DistSQLPlanner) createPlanForIndexJoin( } fetchColIDs := make([]descpb.ColumnID, len(n.cols)) - var fetchOrdinals util.FastIntSet + var fetchOrdinals intsets.FastIntSet for i := range n.cols { fetchColIDs[i] = n.cols[i].GetID() fetchOrdinals.Add(n.cols[i].Ordinal()) @@ -2671,7 +2671,7 @@ func (dsp *DistSQLPlanner) createPlanForLookupJoin( } fetchColIDs := make([]descpb.ColumnID, len(n.table.cols)) - var fetchOrdinals util.FastIntSet + var fetchOrdinals intsets.FastIntSet for i := range n.table.cols { fetchColIDs[i] = n.table.cols[i].GetID() fetchOrdinals.Add(n.table.cols[i].Ordinal()) diff --git a/pkg/sql/drop_function.go b/pkg/sql/drop_function.go index 2c62358312aa..71e971b0533b 100644 --- a/pkg/sql/drop_function.go +++ b/pkg/sql/drop_function.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scerrors" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/errors" ) @@ -51,7 +51,7 @@ func (p *planner) DropFunction( toDrop: make([]*funcdesc.Mutable, 0, len(n.Functions)), dropBehavior: n.DropBehavior, } - fnResolved := util.MakeFastIntSet() + fnResolved := intsets.MakeFastIntSet() for _, fn := range n.Functions { ol, err := p.matchUDF(ctx, &fn, !n.IfExists) if err != nil { diff --git a/pkg/sql/exec_factory_util.go b/pkg/sql/exec_factory_util.go index d9201c4b4db0..7ac23b4f67a0 100644 --- a/pkg/sql/exec_factory_util.go +++ b/pkg/sql/exec_factory_util.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -309,7 +309,7 @@ func constructOpaque(metadata opt.OpaqueMetadata) (planNode, error) { return o.plan, nil } -func convertFastIntSetToUint32Slice(colIdxs util.FastIntSet) []uint32 { +func convertFastIntSetToUint32Slice(colIdxs intsets.FastIntSet) []uint32 { cols := make([]uint32, 0, colIdxs.Len()) for i, ok := colIdxs.Next(0); ok; i, ok = colIdxs.Next(i + 1) { cols = append(cols, uint32(i)) diff --git a/pkg/sql/execinfra/BUILD.bazel b/pkg/sql/execinfra/BUILD.bazel index 0a2bcd23a134..a0adebd3917c 100644 --- a/pkg/sql/execinfra/BUILD.bazel +++ b/pkg/sql/execinfra/BUILD.bazel @@ -58,9 +58,9 @@ go_library( "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/storage/fs", - "//pkg/util", "//pkg/util/admission", "//pkg/util/buildutil", + "//pkg/util/intsets", "//pkg/util/limit", "//pkg/util/log", "//pkg/util/log/logcrash", diff --git a/pkg/sql/execinfra/processorsbase.go b/pkg/sql/execinfra/processorsbase.go index 0f179a064521..f2bb7d95bea3 100644 --- a/pkg/sql/execinfra/processorsbase.go +++ b/pkg/sql/execinfra/processorsbase.go @@ -21,8 +21,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/logcrash" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -982,7 +982,7 @@ type LocalProcessor interface { // HasParallelProcessors returns whether flow contains multiple processors in // the same stage. func HasParallelProcessors(flow *execinfrapb.FlowSpec) bool { - var seen util.FastIntSet + var seen intsets.FastIntSet for _, p := range flow.Processors { if seen.Contains(int(p.StageID)) { return true diff --git a/pkg/sql/execinfrapb/BUILD.bazel b/pkg/sql/execinfrapb/BUILD.bazel index 4a7457ea8a3a..e58fe8c4bfd0 100644 --- a/pkg/sql/execinfrapb/BUILD.bazel +++ b/pkg/sql/execinfrapb/BUILD.bazel @@ -48,6 +48,7 @@ go_library( "//pkg/util/encoding", "//pkg/util/hlc", "//pkg/util/humanizeutil", + "//pkg/util/intsets", "//pkg/util/netutil", "//pkg/util/optional", "//pkg/util/protoutil", diff --git a/pkg/sql/execinfrapb/component_stats.go b/pkg/sql/execinfrapb/component_stats.go index 2ee92cb6049f..7e25c7f88c20 100644 --- a/pkg/sql/execinfrapb/component_stats.go +++ b/pkg/sql/execinfrapb/component_stats.go @@ -15,8 +15,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/optional" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb" @@ -417,8 +417,8 @@ func ExtractStatsFromSpans( // ExtractNodesFromSpans extracts a list of node ids from a set of tracing // spans. -func ExtractNodesFromSpans(spans []tracingpb.RecordedSpan) util.FastIntSet { - var nodes util.FastIntSet +func ExtractNodesFromSpans(spans []tracingpb.RecordedSpan) intsets.FastIntSet { + var nodes intsets.FastIntSet // componentStats is only used to check whether a structured payload item is // of ComponentStats type. var componentStats ComponentStats diff --git a/pkg/sql/importer/BUILD.bazel b/pkg/sql/importer/BUILD.bazel index c13825a525dd..405511c699f5 100644 --- a/pkg/sql/importer/BUILD.bazel +++ b/pkg/sql/importer/BUILD.bazel @@ -100,6 +100,7 @@ go_library( "//pkg/util/errorutil/unimplemented", "//pkg/util/hlc", "//pkg/util/humanizeutil", + "//pkg/util/intsets", "//pkg/util/ioctx", "//pkg/util/log", "//pkg/util/log/eventpb", diff --git a/pkg/sql/importer/read_import_pgdump.go b/pkg/sql/importer/read_import_pgdump.go index c30d075bca1e..f0c69eba163a 100644 --- a/pkg/sql/importer/read_import_pgdump.go +++ b/pkg/sql/importer/read_import_pgdump.go @@ -41,11 +41,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" ) @@ -1127,7 +1127,7 @@ func (m *pgDumpReader) readFile( var targetColMapIdx []int if len(i.Columns) != 0 { targetColMapIdx = make([]int, len(i.Columns)) - conv.TargetColOrds = util.FastIntSet{} + conv.TargetColOrds = intsets.FastIntSet{} for j := range i.Columns { colName := string(i.Columns[j]) idx, ok := m.colMap[conv][colName] @@ -1194,7 +1194,7 @@ func (m *pgDumpReader) readFile( var targetColMapIdx []int if conv != nil { targetColMapIdx = make([]int, len(i.Columns)) - conv.TargetColOrds = util.FastIntSet{} + conv.TargetColOrds = intsets.FastIntSet{} for j := range i.Columns { colName := string(i.Columns[j]) idx, ok := m.colMap[conv][colName] diff --git a/pkg/sql/insert_fast_path.go b/pkg/sql/insert_fast_path.go index 91c675ea5bf9..9bce529323ac 100644 --- a/pkg/sql/insert_fast_path.go +++ b/pkg/sql/insert_fast_path.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/span" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -106,7 +106,7 @@ func (c *insertFastPathFKCheck) init(params runParams) error { codec := params.ExecCfg().Codec c.keyPrefix = rowenc.MakeIndexKeyPrefix(codec, c.tabDesc.GetID(), c.idx.GetID()) c.spanBuilder.Init(params.EvalContext(), codec, c.tabDesc, c.idx) - c.spanSplitter = span.MakeSplitter(c.tabDesc, c.idx, util.FastIntSet{} /* neededColOrdinals */) + c.spanSplitter = span.MakeSplitter(c.tabDesc, c.idx, intsets.FastIntSet{} /* neededColOrdinals */) if len(c.InsertCols) > idx.numLaxKeyCols { return errors.AssertionFailedf( diff --git a/pkg/sql/instrumentation.go b/pkg/sql/instrumentation.go index 25f599e414f1..cc1efa856ab7 100644 --- a/pkg/sql/instrumentation.go +++ b/pkg/sql/instrumentation.go @@ -40,6 +40,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/stmtdiagnostics" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb" @@ -667,7 +668,7 @@ func (m execNodeTraceMetadata) annotateExplain( var nodeStats exec.ExecutionStats incomplete := false - var nodes util.FastIntSet + var nodes intsets.FastIntSet regionsMap := make(map[string]struct{}) for _, c := range components { if c.Type == execinfrapb.ComponentID_PROCESSOR { diff --git a/pkg/sql/opt/BUILD.bazel b/pkg/sql/opt/BUILD.bazel index 1e417a36c73c..828bb67fd8bd 100644 --- a/pkg/sql/opt/BUILD.bazel +++ b/pkg/sql/opt/BUILD.bazel @@ -42,6 +42,7 @@ go_library( "//pkg/sql/types", "//pkg/util", "//pkg/util/buildutil", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", "@com_github_lib_pq//oid", @@ -70,7 +71,7 @@ go_test( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", + "//pkg/util/intsets", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/sql/opt/colset.go b/pkg/sql/opt/colset.go index f6b6cc107b0b..cd8119e94e0b 100644 --- a/pkg/sql/opt/colset.go +++ b/pkg/sql/opt/colset.go @@ -11,14 +11,14 @@ package opt import ( - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) // ColSet efficiently stores an unordered set of column ids. type ColSet struct { - set util.FastIntSet + set intsets.FastIntSet } // We offset the ColumnIDs in the underlying FastIntSet by 1, so that the @@ -116,7 +116,7 @@ func (s ColSet) SubsetOf(rhs ColSet) bool { return s.set.SubsetOf(rhs.set) } // numbers are shown as ranges. For example, for the set {1, 2, 3 5, 6, 10}, // the output is "(1-3,5,6,10)". func (s ColSet) String() string { - var noOffset util.FastIntSet + var noOffset intsets.FastIntSet s.ForEach(func(col ColumnID) { noOffset.Add(int(col)) }) diff --git a/pkg/sql/opt/colset_test.go b/pkg/sql/opt/colset_test.go index 704351a92dfa..355668f648d6 100644 --- a/pkg/sql/opt/colset_test.go +++ b/pkg/sql/opt/colset_test.go @@ -13,7 +13,7 @@ package opt import ( "testing" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) func BenchmarkColSet(b *testing.B) { @@ -22,7 +22,7 @@ func BenchmarkColSet(b *testing.B) { const n = 50 b.Run("fastintset", func(b *testing.B) { for i := 0; i < b.N; i++ { - var c util.FastIntSet + var c intsets.FastIntSet for j := 1; j <= n; j++ { c.Add(j) } diff --git a/pkg/sql/opt/constraint/BUILD.bazel b/pkg/sql/opt/constraint/BUILD.bazel index e2db1c6e098c..f938b177ea3d 100644 --- a/pkg/sql/opt/constraint/BUILD.bazel +++ b/pkg/sql/opt/constraint/BUILD.bazel @@ -48,8 +48,8 @@ go_test( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", "//pkg/util/encoding", + "//pkg/util/intsets", "//pkg/util/leaktest", "//pkg/util/randutil", ], diff --git a/pkg/sql/opt/constraint/constraint_test.go b/pkg/sql/opt/constraint/constraint_test.go index b0f34742442c..6188bec2a1e3 100644 --- a/pkg/sql/opt/constraint/constraint_test.go +++ b/pkg/sql/opt/constraint/constraint_test.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testcat" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" ) @@ -596,7 +596,7 @@ func TestConsolidateLocalAndRemoteSpans(t *testing.T) { // only has the partitions and ps (PrefixSorter) elements populated. partitionSpans := parseSpans(&evalCtx, tc.partitionSpans) partitions := make([]testcat.Partition, partitionSpans.Count()) - localPartitions := util.FastIntSet{} + localPartitions := intsets.FastIntSet{} for j := 0; j < partitionSpans.Count(); j++ { span := partitionSpans.Get(j) spanDatums := make([]tree.Datums, 1) diff --git a/pkg/sql/opt/exec/BUILD.bazel b/pkg/sql/opt/exec/BUILD.bazel index 5ab669d46731..16583d0a8c3e 100644 --- a/pkg/sql/opt/exec/BUILD.bazel +++ b/pkg/sql/opt/exec/BUILD.bazel @@ -20,7 +20,7 @@ go_library( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/optional", ], ) diff --git a/pkg/sql/opt/exec/execbuilder/BUILD.bazel b/pkg/sql/opt/exec/execbuilder/BUILD.bazel index 4138ad9b41fd..2bd5baea28a3 100644 --- a/pkg/sql/opt/exec/execbuilder/BUILD.bazel +++ b/pkg/sql/opt/exec/execbuilder/BUILD.bazel @@ -48,6 +48,7 @@ go_library( "//pkg/util/encoding", "//pkg/util/errorutil", "//pkg/util/errorutil/unimplemented", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/timeutil", "//pkg/util/treeprinter", diff --git a/pkg/sql/opt/exec/execbuilder/mutation.go b/pkg/sql/opt/exec/execbuilder/mutation.go index f24df385ba7c..6c3d99429d15 100644 --- a/pkg/sql/opt/exec/execbuilder/mutation.go +++ b/pkg/sql/opt/exec/execbuilder/mutation.go @@ -25,7 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -652,8 +652,8 @@ func appendColsWhenPresent(dst opt.ColList, src opt.OptionalColList) opt.ColList // column ID in the given list. This is used with mutation operators, which // maintain lists that correspond to the target table, with zero column IDs // indicating columns that are not involved in the mutation. -func ordinalSetFromColList(colList opt.OptionalColList) util.FastIntSet { - var res util.FastIntSet +func ordinalSetFromColList(colList opt.OptionalColList) intsets.FastIntSet { + var res intsets.FastIntSet for i, col := range colList { if col != 0 { res.Add(i) diff --git a/pkg/sql/opt/exec/explain/BUILD.bazel b/pkg/sql/opt/exec/explain/BUILD.bazel index dc1fcbe02920..915b0451538b 100644 --- a/pkg/sql/opt/exec/explain/BUILD.bazel +++ b/pkg/sql/opt/exec/explain/BUILD.bazel @@ -35,6 +35,7 @@ go_library( "//pkg/util", "//pkg/util/errorutil", "//pkg/util/humanizeutil", + "//pkg/util/intsets", "//pkg/util/timeutil", "//pkg/util/treeprinter", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/sql/opt/exec/explain/plan_gist_factory.go b/pkg/sql/opt/exec/explain/plan_gist_factory.go index 9ba7e1c6fc24..f32015a6d1f7 100644 --- a/pkg/sql/opt/exec/explain/plan_gist_factory.go +++ b/pkg/sql/opt/exec/explain/plan_gist_factory.go @@ -31,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -369,7 +370,7 @@ func (f *PlanGistFactory) decodeBool() bool { return val != 0 } -func (f *PlanGistFactory) encodeFastIntSet(s util.FastIntSet) { +func (f *PlanGistFactory) encodeFastIntSet(s intsets.FastIntSet) { lenBefore := f.buffer.Len() if err := s.Encode(&f.buffer); err != nil { panic(err) @@ -414,7 +415,7 @@ func (f *PlanGistFactory) encodeScanParams(params exec.ScanParams) { } func (f *PlanGistFactory) decodeScanParams() exec.ScanParams { - neededCols := util.FastIntSet{} + neededCols := intsets.FastIntSet{} err := neededCols.Decode(&f.buffer) if err != nil { panic(err) diff --git a/pkg/sql/opt/exec/factory.go b/pkg/sql/opt/exec/factory.go index 989a2db9cb2f..3b2448700eab 100644 --- a/pkg/sql/opt/exec/factory.go +++ b/pkg/sql/opt/exec/factory.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/optional" ) @@ -125,18 +125,18 @@ const ( type TableColumnOrdinal int32 // TableColumnOrdinalSet contains a set of TableColumnOrdinal values. -type TableColumnOrdinalSet = util.FastIntSet +type TableColumnOrdinalSet = intsets.FastIntSet // NodeColumnOrdinal is the 0-based ordinal index of a column produced by a // Node. It is used when referring to a column in an input to an operator. type NodeColumnOrdinal int32 // NodeColumnOrdinalSet contains a set of NodeColumnOrdinal values. -type NodeColumnOrdinalSet = util.FastIntSet +type NodeColumnOrdinalSet = intsets.FastIntSet // CheckOrdinalSet contains the ordinal positions of a set of check constraints // taken from the opt.Table.Check collection. -type CheckOrdinalSet = util.FastIntSet +type CheckOrdinalSet = intsets.FastIntSet // AggInfo represents an aggregation (see ConstructGroupBy). type AggInfo struct { diff --git a/pkg/sql/opt/indexrec/BUILD.bazel b/pkg/sql/opt/indexrec/BUILD.bazel index 1d84a85f5289..a35728ade5ba 100644 --- a/pkg/sql/opt/indexrec/BUILD.bazel +++ b/pkg/sql/opt/indexrec/BUILD.bazel @@ -21,7 +21,7 @@ go_library( "//pkg/sql/opt/memo", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/opt/indexrec/candidate.go b/pkg/sql/opt/indexrec/candidate.go index 8d0a59de142f..a3afc3dfbcf5 100644 --- a/pkg/sql/opt/indexrec/candidate.go +++ b/pkg/sql/opt/indexrec/candidate.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // FindIndexCandidateSet returns a map storing potential indexes for each table @@ -281,7 +281,7 @@ func constructLeftIndexCombination( rightIndexes [][]cat.IndexColumn, outputIndexes map[cat.Table][][]cat.IndexColumn, ) { - var leftIndexColSet util.FastIntSet + var leftIndexColSet intsets.FastIntSet // Store left columns in a set for fast access. for _, leftCol := range leftIndex { leftIndexColSet.Add(int(leftCol.ColID())) diff --git a/pkg/sql/opt/indexrec/hypothetical_index.go b/pkg/sql/opt/indexrec/hypothetical_index.go index d895dfbe1b12..03745bb7ded4 100644 --- a/pkg/sql/opt/indexrec/hypothetical_index.go +++ b/pkg/sql/opt/indexrec/hypothetical_index.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -67,7 +67,7 @@ func (hi *hypotheticalIndex) init( hi.zone = zone // Build an index column ordinal set. - var colsOrdSet util.FastIntSet + var colsOrdSet intsets.FastIntSet for _, col := range hi.cols { colsOrdSet.Add(col.Ordinal()) } diff --git a/pkg/sql/opt/indexrec/hypothetical_table.go b/pkg/sql/opt/indexrec/hypothetical_table.go index 4e295bd359cd..7704ef711a96 100644 --- a/pkg/sql/opt/indexrec/hypothetical_table.go +++ b/pkg/sql/opt/indexrec/hypothetical_table.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // BuildOptAndHypTableMaps builds a HypotheticalTable for each table in @@ -79,7 +79,7 @@ func BuildOptAndHypTableMaps( type HypotheticalTable struct { cat.Table invertedCols []*cat.Column - primaryKeyColsOrdSet util.FastIntSet + primaryKeyColsOrdSet intsets.FastIntSet hypotheticalIndexes []hypotheticalIndex } diff --git a/pkg/sql/opt/indexrec/rec.go b/pkg/sql/opt/indexrec/rec.go index db150829f6f1..3c755a2fd6b0 100644 --- a/pkg/sql/opt/indexrec/rec.go +++ b/pkg/sql/opt/indexrec/rec.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // Type represents the type of index recommendation for Rec. @@ -94,8 +94,8 @@ func (rc recCollector) addIndexRec(md *opt.Metadata, expr opt.Expr) { } // getStoredCols returns stored columns of the given existingIndex. -func getStoredCols(existingIndex cat.Index) util.FastIntSet { - var existingStoredOrds util.FastIntSet +func getStoredCols(existingIndex cat.Index) intsets.FastIntSet { + var existingStoredOrds intsets.FastIntSet for i, n := existingIndex.KeyColumnCount(), existingIndex.ColumnCount(); i < n; i++ { existingStoredOrds.Add(existingIndex.Column(i).Ordinal()) } @@ -104,8 +104,8 @@ func getStoredCols(existingIndex cat.Index) util.FastIntSet { // getAllCols returns columns of the given existingIndex including in the // explicit columns and STORING clause. -func getAllCols(existingIndex cat.Index) util.FastIntSet { - var existingAllOrds util.FastIntSet +func getAllCols(existingIndex cat.Index) intsets.FastIntSet { + var existingAllOrds intsets.FastIntSet for i, n := 0, existingIndex.ColumnCount(); i < n; i++ { existingAllOrds.Add(existingIndex.Column(i).Ordinal()) } @@ -136,12 +136,13 @@ func getAllCols(existingIndex cat.Index) util.FastIntSet { // candidate for existing index, and its already stored columns. If not found, // this means that there does not exist an index that satisfy the requirement to // be a candidate. So no existing indexes can be replaced, and creating a new -// index is necessary. It returns TypeCreateIndex, nil, and util.FastIntSet{}. -// If there is a candidate that stores every column from actuallyScannedCols, -// typeUseless, nil, {} is returned. Theoretically, this should never happen. +// index is necessary. It returns TypeCreateIndex, nil, and +// intsets.FastIntSet{}. If there is a candidate that stores every column from +// actuallyScannedCols, typeUseless, nil, {} is returned. Theoretically, this +// should never happen. func findBestExistingIndexToReplace( - table cat.Table, hypIndex *hypotheticalIndex, actuallyScannedCols util.FastIntSet, -) (Type, cat.Index, util.FastIntSet) { + table cat.Table, hypIndex *hypotheticalIndex, actuallyScannedCols intsets.FastIntSet, +) (Type, cat.Index, intsets.FastIntSet) { // To find the existing index with most columns in actuallyScannedCol, we keep // track of the best candidate for existing index and its stored columns. @@ -157,7 +158,7 @@ func findBestExistingIndexToReplace( // actuallyScannedCol). minColsDiff := actuallyScannedCols.Len() var existingIndexCandidate cat.Index - var existingIndexCandidateStoredCol util.FastIntSet + var existingIndexCandidateStoredCol intsets.FastIntSet for i, n := 0, table.IndexCount(); i < n; i++ { // Iterate through every existing index in the table. @@ -178,7 +179,7 @@ func findBestExistingIndexToReplace( // SELECT a FROM t WHERE b > 0, hypIndex(a), actuallyScannedCol b. // invisible_idx(a, b) could still be used. Creating a new index with // idx(a) STORING b is unnecessary. - return TypeAlterIndex, existingIndex, util.FastIntSet{} + return TypeAlterIndex, existingIndex, intsets.FastIntSet{} } // Skip any invisible indexes. continue @@ -206,7 +207,7 @@ func findBestExistingIndexToReplace( // scanned cols is neither included in explicit columns nor stored. // Otherwise, the optimizer should use the existing index, and no index // recommendation should be constructed. - return typeUseless, nil, util.FastIntSet{} + return typeUseless, nil, intsets.FastIntSet{} } else if existingIndexCandidate == nil || storedColsDiffSet.Len() < minColsDiff { // Otherwise, storedColsDiffSet is non-empty. The existing index is // missing some columns in actuallyScannedCol. If no candidate has been @@ -222,7 +223,7 @@ func findBestExistingIndexToReplace( if existingIndexCandidate == nil { // There doesn't exist an index with same explicit columns as hypIndex. // Recommend index creation. - return TypeCreateIndex, nil, util.FastIntSet{} + return TypeCreateIndex, nil, intsets.FastIntSet{} } return TypeReplaceIndex, existingIndexCandidate, existingIndexCandidateStoredCol @@ -319,8 +320,8 @@ func (rc recCollector) outputIndexRec() []Rec { // getColOrdSet returns the set of column ordinals within the given table that // are contained in cols. -func getColOrdSet(md *opt.Metadata, cols opt.ColSet, tabID opt.TableID) util.FastIntSet { - var colsOrdSet util.FastIntSet +func getColOrdSet(md *opt.Metadata, cols opt.ColSet, tabID opt.TableID) intsets.FastIntSet { + var colsOrdSet intsets.FastIntSet cols.ForEach(func(col opt.ColumnID) { table := md.ColumnMeta(col).Table // Do not add columns from other tables. @@ -400,13 +401,13 @@ type indexRecommendation struct { // newStoredColOrds stores the stored column ordinals that are scanned by the // optimizer in the expression tree passed to FindRecs. - newStoredColOrds util.FastIntSet + newStoredColOrds intsets.FastIntSet } // init initializes an index recommendation. If there is an existingIndex with // the same explicit columns, it is stored here. func (ir *indexRecommendation) init( - indexOrd int, hypTable *HypotheticalTable, scannedColOrds util.FastIntSet, + indexOrd int, hypTable *HypotheticalTable, scannedColOrds intsets.FastIntSet, ) { index := hypTable.Index(indexOrd).(*hypotheticalIndex) ir.index = index @@ -421,7 +422,7 @@ func (ir *indexRecommendation) init( // addStoredColOrds updates an index recommendation's newStoredColOrds field to // also contain the scannedColOrds columns. -func (ir *indexRecommendation) addStoredColOrds(scannedColOrds util.FastIntSet) { +func (ir *indexRecommendation) addStoredColOrds(scannedColOrds intsets.FastIntSet) { for i := range ir.index.storedCols { colOrd := ir.index.storedCols[i].Column.Ordinal() if scannedColOrds.Contains(colOrd) { diff --git a/pkg/sql/opt/lookupjoin/BUILD.bazel b/pkg/sql/opt/lookupjoin/BUILD.bazel index a779e814b857..45235f75c7db 100644 --- a/pkg/sql/opt/lookupjoin/BUILD.bazel +++ b/pkg/sql/opt/lookupjoin/BUILD.bazel @@ -19,7 +19,7 @@ go_library( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/opt/lookupjoin/constraint_builder.go b/pkg/sql/opt/lookupjoin/constraint_builder.go index 6df6ea84a924..12a3a70ae250 100644 --- a/pkg/sql/opt/lookupjoin/constraint_builder.go +++ b/pkg/sql/opt/lookupjoin/constraint_builder.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -216,7 +216,7 @@ func (b *ConstraintBuilder) Build( var inputProjections memo.ProjectionsExpr var lookupExpr memo.FiltersExpr var constFilters memo.FiltersExpr - var filterOrdsToExclude util.FastIntSet + var filterOrdsToExclude intsets.FastIntSet foundLookupCols := false lookupExprRequired := false remainingFilters := make(memo.FiltersExpr, 0, len(onFilters)) diff --git a/pkg/sql/opt/memo/BUILD.bazel b/pkg/sql/opt/memo/BUILD.bazel index ccfe09a53972..87efc59d26ed 100644 --- a/pkg/sql/opt/memo/BUILD.bazel +++ b/pkg/sql/opt/memo/BUILD.bazel @@ -45,10 +45,10 @@ go_library( "//pkg/sql/sem/volatility", "//pkg/sql/stats", "//pkg/sql/types", - "//pkg/util", "//pkg/util/buildutil", "//pkg/util/duration", "//pkg/util/encoding", + "//pkg/util/intsets", "//pkg/util/iterutil", "//pkg/util/json", "//pkg/util/log", @@ -101,14 +101,13 @@ go_test( "//pkg/sql/sem/tree/treewindow", "//pkg/sql/types", "//pkg/testutils", - "//pkg/util", "//pkg/util/duration", + "//pkg/util/intsets", "//pkg/util/leaktest", "//pkg/util/timeofday", "//pkg/util/timeutil/pgdate", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_cockroachdb_errors//:errors", - "@org_golang_x_tools//container/intsets", ], ) diff --git a/pkg/sql/opt/memo/expr.go b/pkg/sql/opt/memo/expr.go index b38b3cbd883a..77452b194cf1 100644 --- a/pkg/sql/opt/memo/expr.go +++ b/pkg/sql/opt/memo/expr.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treewindow" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -422,7 +422,7 @@ type ScanFlags struct { // ZigzagIndexes makes planner prefer a zigzag with particular indexes. // ForceZigzag must also be true. - ZigzagIndexes util.FastIntSet + ZigzagIndexes intsets.FastIntSet } // Empty returns true if there are no flags set. diff --git a/pkg/sql/opt/memo/extract.go b/pkg/sql/opt/memo/extract.go index e042c15fd290..f0120b4752af 100644 --- a/pkg/sql/opt/memo/extract.go +++ b/pkg/sql/opt/memo/extract.go @@ -14,7 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -177,7 +177,7 @@ func HasJoinCondition(leftCols, rightCols opt.ColSet, on FiltersExpr, inequality // equalities. func ExtractJoinConditionFilterOrds( leftCols, rightCols opt.ColSet, on FiltersExpr, inequality bool, -) (filterOrds util.FastIntSet) { +) (filterOrds intsets.FastIntSet) { var seenCols opt.ColSet for i := range on { condition := on[i].Condition diff --git a/pkg/sql/opt/memo/interner_test.go b/pkg/sql/opt/memo/interner_test.go index d07d6c1d9f66..c7fb393ccef1 100644 --- a/pkg/sql/opt/memo/interner_test.go +++ b/pkg/sql/opt/memo/interner_test.go @@ -27,10 +27,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treewindow" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/timeofday" "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" - "golang.org/x/tools/container/intsets" ) func TestInterner(t *testing.T) { @@ -379,7 +378,7 @@ func TestInterner(t *testing.T) { {hashFn: in.hasher.HashScanFlags, eqFn: in.hasher.IsScanFlagsEqual, variations: []testVariation{ // Use unnamed fields so that compilation fails if a new field is // added to ScanFlags. - {val1: ScanFlags{false, false, false, false, false, 0, 0, false, util.FastIntSet{}}, val2: ScanFlags{}, equal: true}, + {val1: ScanFlags{false, false, false, false, false, 0, 0, false, intsets.FastIntSet{}}, val2: ScanFlags{}, equal: true}, {val1: ScanFlags{}, val2: ScanFlags{}, equal: true}, {val1: ScanFlags{NoIndexJoin: false}, val2: ScanFlags{NoIndexJoin: true}, equal: false}, {val1: ScanFlags{NoIndexJoin: true}, val2: ScanFlags{NoIndexJoin: true}, equal: true}, @@ -463,9 +462,9 @@ func TestInterner(t *testing.T) { }}, {hashFn: in.hasher.HashSchemaTypeDeps, eqFn: in.hasher.IsSchemaTypeDepsEqual, variations: []testVariation{ - {val1: util.MakeFastIntSet(), val2: util.MakeFastIntSet(), equal: true}, - {val1: util.MakeFastIntSet(1, 2, 3), val2: util.MakeFastIntSet(3, 2, 1), equal: true}, - {val1: util.MakeFastIntSet(1, 2, 3), val2: util.MakeFastIntSet(1, 2), equal: false}, + {val1: intsets.MakeFastIntSet(), val2: intsets.MakeFastIntSet(), equal: true}, + {val1: intsets.MakeFastIntSet(1, 2, 3), val2: intsets.MakeFastIntSet(3, 2, 1), equal: true}, + {val1: intsets.MakeFastIntSet(1, 2, 3), val2: intsets.MakeFastIntSet(1, 2), equal: false}, }}, {hashFn: in.hasher.HashWindowFrame, eqFn: in.hasher.IsWindowFrameEqual, variations: []testVariation{ diff --git a/pkg/sql/opt/metadata.go b/pkg/sql/opt/metadata.go index d32dcca15ed6..5dafce1dd3dc 100644 --- a/pkg/sql/opt/metadata.go +++ b/pkg/sql/opt/metadata.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" ) @@ -722,7 +722,7 @@ func (md *Metadata) AllViews() []cat.View { func (md *Metadata) getAllReferencedTables( ctx context.Context, catalog cat.Catalog, ) []cat.DataSource { - var tableSet util.FastIntSet + var tableSet intsets.FastIntSet var tableList []cat.DataSource var addForeignKeyReferencedTables func(tab cat.Table) addForeignKeyReferencedTables = func(tab cat.Table) { diff --git a/pkg/sql/opt/norm/BUILD.bazel b/pkg/sql/opt/norm/BUILD.bazel index 4c4dcb59f1b7..188b239df6c7 100644 --- a/pkg/sql/opt/norm/BUILD.bazel +++ b/pkg/sql/opt/norm/BUILD.bazel @@ -56,6 +56,7 @@ go_library( "//pkg/util/buildutil", "//pkg/util/encoding", "//pkg/util/errorutil", + "//pkg/util/intsets", "//pkg/util/json", "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/sql/opt/norm/factory.go b/pkg/sql/opt/norm/factory.go index d577c7924a2e..7e22fb0d1a60 100644 --- a/pkg/sql/opt/norm/factory.go +++ b/pkg/sql/opt/norm/factory.go @@ -22,9 +22,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/errorutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -102,7 +102,7 @@ type Factory struct { // disabledRules is a set of rules that are not allowed to run, used when // rules are disabled during testing to prevent rule cycles. - disabledRules util.FastIntSet + disabledRules intsets.FastIntSet } // maxConstructorStackDepth is the maximum allowed depth of a constructor call @@ -217,7 +217,7 @@ func (f *Factory) NotifyOnAppliedRule(appliedRule AppliedRuleFunc) { // disabled during testing. SetDisabledRules does not prevent rules from // matching - rather, it notifies the Factory that rules have been prevented // from matching using NotifyOnMatchedRule. -func (f *Factory) SetDisabledRules(disabledRules util.FastIntSet) { +func (f *Factory) SetDisabledRules(disabledRules intsets.FastIntSet) { f.disabledRules = disabledRules } diff --git a/pkg/sql/opt/norm/inline_funcs.go b/pkg/sql/opt/norm/inline_funcs.go index d9f7210211d7..309b84925de1 100644 --- a/pkg/sql/opt/norm/inline_funcs.go +++ b/pkg/sql/opt/norm/inline_funcs.go @@ -14,7 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -317,7 +317,7 @@ func (c *CustomFuncs) extractVarEqualsConst( func (c *CustomFuncs) CanInlineConstVar(f memo.FiltersExpr) bool { // usedIndices tracks the set of filter indices we've used to infer constant // values, so we don't inline into them. - var usedIndices util.FastIntSet + var usedIndices intsets.FastIntSet // fixedCols is the set of columns that the filters restrict to be a constant // value. var fixedCols opt.ColSet @@ -350,7 +350,7 @@ func (c *CustomFuncs) CanInlineConstVar(f memo.FiltersExpr) bool { func (c *CustomFuncs) InlineConstVar(f memo.FiltersExpr) memo.FiltersExpr { // usedIndices tracks the set of filter indices we've used to infer constant // values, so we don't inline into them. - var usedIndices util.FastIntSet + var usedIndices intsets.FastIntSet // fixedCols is the set of columns that the filters restrict to be a constant // value. var fixedCols opt.ColSet diff --git a/pkg/sql/opt/norm/project_funcs.go b/pkg/sql/opt/norm/project_funcs.go index fc2747d549d2..840b3fc25607 100644 --- a/pkg/sql/opt/norm/project_funcs.go +++ b/pkg/sql/opt/norm/project_funcs.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/json" "github.com/cockroachdb/errors" ) @@ -754,7 +754,7 @@ func (c *CustomFuncs) PushAssignmentCastsIntoValues( // will map a new column produced by the new values expression to their // output column. castOrds tracks the column ordinals in the values // expression to push assignment casts down to. - var castOrds util.FastIntSet + var castOrds intsets.FastIntSet newProjections := make(memo.ProjectionsExpr, 0, len(projections)) for i := range projections { col, targetType, ok := extractAssignmentCastInputColAndTargetType(projections[i].Element) diff --git a/pkg/sql/opt/norm/prune_cols_funcs.go b/pkg/sql/opt/norm/prune_cols_funcs.go index a75617b8311c..1e2f82d6c03b 100644 --- a/pkg/sql/opt/norm/prune_cols_funcs.go +++ b/pkg/sql/opt/norm/prune_cols_funcs.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // NeededGroupingCols returns the columns needed by a grouping operator's @@ -505,7 +505,7 @@ func (c *CustomFuncs) PruneWindows(needed opt.ColSet, windows memo.WindowsExpr) // are randomly disabled for testing. It is used to prevent propagating the // PruneCols property when the corresponding column-pruning normalization rule // is disabled. This prevents rule cycles during testing. -func DerivePruneCols(e memo.RelExpr, disabledRules util.FastIntSet) opt.ColSet { +func DerivePruneCols(e memo.RelExpr, disabledRules intsets.FastIntSet) opt.ColSet { relProps := e.Relational() if relProps.IsAvailable(props.PruneCols) { return relProps.Rule.PruneCols diff --git a/pkg/sql/opt/norm/reject_nulls_funcs.go b/pkg/sql/opt/norm/reject_nulls_funcs.go index b4607f53dffa..b7ee010cae5f 100644 --- a/pkg/sql/opt/norm/reject_nulls_funcs.go +++ b/pkg/sql/opt/norm/reject_nulls_funcs.go @@ -14,7 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -124,7 +124,7 @@ func (c *CustomFuncs) NullRejectProjections( // are randomly disabled for testing. It is used to prevent propagating the // RejectNullCols property when the corresponding column-pruning normalization // rule is disabled. This prevents rule cycles during testing. -func DeriveRejectNullCols(in memo.RelExpr, disabledRules util.FastIntSet) opt.ColSet { +func DeriveRejectNullCols(in memo.RelExpr, disabledRules intsets.FastIntSet) opt.ColSet { // Lazily calculate and store the RejectNullCols value. relProps := in.Relational() if relProps.IsAvailable(props.RejectNullCols) { @@ -231,7 +231,7 @@ func DeriveRejectNullCols(in memo.RelExpr, disabledRules util.FastIntSet) opt.Co // 2. The aggregate function returns null if its input is empty. And since // by #1, the presence of nulls does not alter the result, the aggregate // function would return null if its input contains only null values. -func deriveGroupByRejectNullCols(in memo.RelExpr, disabledRules util.FastIntSet) opt.ColSet { +func deriveGroupByRejectNullCols(in memo.RelExpr, disabledRules intsets.FastIntSet) opt.ColSet { input := in.Child(0).(memo.RelExpr) aggs := *in.Child(1).(*memo.AggregationsExpr) @@ -310,7 +310,7 @@ func (c *CustomFuncs) MakeNullRejectFilters(nullRejectCols opt.ColSet) memo.Filt // // 1. The projection "transmits" nulls - it returns NULL when one or more of // its inputs is NULL. -func deriveProjectRejectNullCols(in memo.RelExpr, disabledRules util.FastIntSet) opt.ColSet { +func deriveProjectRejectNullCols(in memo.RelExpr, disabledRules intsets.FastIntSet) opt.ColSet { rejectNullCols := DeriveRejectNullCols(in.Child(0).(memo.RelExpr), disabledRules) projections := *in.Child(1).(*memo.ProjectionsExpr) var projectionsRejectCols opt.ColSet diff --git a/pkg/sql/opt/optbuilder/BUILD.bazel b/pkg/sql/opt/optbuilder/BUILD.bazel index bfd818882d89..ede05ce7c699 100644 --- a/pkg/sql/opt/optbuilder/BUILD.bazel +++ b/pkg/sql/opt/optbuilder/BUILD.bazel @@ -86,6 +86,7 @@ go_library( "//pkg/util", "//pkg/util/errorutil", "//pkg/util/errorutil/unimplemented", + "//pkg/util/intsets", "//pkg/util/log", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", diff --git a/pkg/sql/opt/optbuilder/arbiter_set.go b/pkg/sql/opt/optbuilder/arbiter_set.go index b738b87d7c68..fc96af5d388c 100644 --- a/pkg/sql/opt/optbuilder/arbiter_set.go +++ b/pkg/sql/opt/optbuilder/arbiter_set.go @@ -13,7 +13,7 @@ package optbuilder import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -26,11 +26,11 @@ type arbiterSet struct { // indexes contains the index arbiters in the set, as ordinals into the // table's indexes. - indexes util.FastIntSet + indexes intsets.FastIntSet // uniqueConstraints contains the unique constraint arbiters in the set, as // ordinals into the table's unique constraints. - uniqueConstraints util.FastIntSet + uniqueConstraints intsets.FastIntSet } // makeArbiterSet returns an initialized arbiterSet. @@ -106,7 +106,7 @@ func (a *arbiterSet) ContainsUniqueConstraint(uniq cat.UniqueOrdinal) bool { // - canaryOrd is the table column ordinal of a not-null column in the // constraint's table. func (a *arbiterSet) ForEach( - f func(name string, conflictOrds util.FastIntSet, pred tree.Expr, canaryOrd int), + f func(name string, conflictOrds intsets.FastIntSet, pred tree.Expr, canaryOrd int), ) { // Call the callback for each index arbiter. a.indexes.ForEach(func(i int) { @@ -187,7 +187,7 @@ type minArbiterSet struct { // indexConflictOrdsCache caches the conflict column sets of arbiter indexes // in the set. - indexConflictOrdsCache map[cat.IndexOrdinal]util.FastIntSet + indexConflictOrdsCache map[cat.IndexOrdinal]intsets.FastIntSet } // makeMinArbiterSet returns an initialized arbiterSet. @@ -258,7 +258,7 @@ func (m *minArbiterSet) initCache() { return } // Cache each index's conflict columns. - m.indexConflictOrdsCache = make(map[cat.IndexOrdinal]util.FastIntSet, m.as.indexes.Len()) + m.indexConflictOrdsCache = make(map[cat.IndexOrdinal]intsets.FastIntSet, m.as.indexes.Len()) m.as.indexes.ForEach(func(i int) { index := m.as.mb.tab.Index(i) m.indexConflictOrdsCache[i] = getIndexLaxKeyOrdinals(index) diff --git a/pkg/sql/opt/optbuilder/create_function.go b/pkg/sql/opt/optbuilder/create_function.go index ced51d23fe1d..e71d9b1fad6b 100644 --- a/pkg/sql/opt/optbuilder/create_function.go +++ b/pkg/sql/opt/optbuilder/create_function.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/cast" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -53,7 +53,7 @@ func (b *Builder) buildCreateFunction(cf *tree.CreateFunction, inScope *scope) ( b.insideFuncDef = false b.trackSchemaDeps = false b.schemaDeps = nil - b.schemaTypeDeps = util.FastIntSet{} + b.schemaTypeDeps = intsets.FastIntSet{} b.qualifyDataSourceNamesInAST = false b.semaCtx.FunctionResolver = preFuncResolver @@ -164,7 +164,7 @@ func (b *Builder) buildCreateFunction(cf *tree.CreateFunction, inScope *scope) ( typeDeps.UnionWith(b.schemaTypeDeps) // Reset the tracked dependencies for next statement. b.schemaDeps = nil - b.schemaTypeDeps = util.FastIntSet{} + b.schemaTypeDeps = intsets.FastIntSet{} } // Override the function body so that references are fully qualified. diff --git a/pkg/sql/opt/optbuilder/create_view.go b/pkg/sql/opt/optbuilder/create_view.go index 0ca175227e6c..200c599ced56 100644 --- a/pkg/sql/opt/optbuilder/create_view.go +++ b/pkg/sql/opt/optbuilder/create_view.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -41,7 +42,7 @@ func (b *Builder) buildCreateView(cv *tree.CreateView, inScope *scope) (outScope b.insideViewDef = false b.trackSchemaDeps = false b.schemaDeps = nil - b.schemaTypeDeps = util.FastIntSet{} + b.schemaTypeDeps = intsets.FastIntSet{} b.qualifyDataSourceNamesInAST = false b.semaCtx.FunctionResolver = preFuncResolver diff --git a/pkg/sql/opt/optbuilder/fk_cascade.go b/pkg/sql/opt/optbuilder/fk_cascade.go index 3c8a5bb94169..edb0a85aaba8 100644 --- a/pkg/sql/opt/optbuilder/fk_cascade.go +++ b/pkg/sql/opt/optbuilder/fk_cascade.go @@ -22,8 +22,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -207,7 +207,7 @@ func tryNewOnDeleteFastCascadeBuilder( return nil, false } - var visited util.FastIntSet + var visited intsets.FastIntSet parentTabID := parentTab.ID() childTabID := childTab.ID() diff --git a/pkg/sql/opt/optbuilder/insert.go b/pkg/sql/opt/optbuilder/insert.go index d5e3c1c2200e..479a500550ff 100644 --- a/pkg/sql/opt/optbuilder/insert.go +++ b/pkg/sql/opt/optbuilder/insert.go @@ -24,8 +24,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treecmp" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -707,14 +707,14 @@ func (mb *mutationBuilder) buildInputForDoNothing(inScope *scope, onConflict *tr mb.outScope.ordering = nil // Create an anti-join for each arbiter. - mb.arbiters.ForEach(func(name string, conflictOrds util.FastIntSet, pred tree.Expr, canaryOrd int) { + mb.arbiters.ForEach(func(name string, conflictOrds intsets.FastIntSet, pred tree.Expr, canaryOrd int) { mb.buildAntiJoinForDoNothingArbiter(inScope, conflictOrds, pred) }) // Create an UpsertDistinctOn for each arbiter. This must happen after all // conflicting rows are removed with the anti-joins created above, to avoid // removing valid rows (see #59125). - mb.arbiters.ForEach(func(name string, conflictOrds util.FastIntSet, pred tree.Expr, canaryOrd int) { + mb.arbiters.ForEach(func(name string, conflictOrds intsets.FastIntSet, pred tree.Expr, canaryOrd int) { // If the arbiter has a partial predicate, project a new column that // allows the UpsertDistinctOn to only de-duplicate insert rows that // satisfy the predicate. See projectPartialArbiterDistinctColumn for @@ -760,7 +760,7 @@ func (mb *mutationBuilder) buildInputForUpsert( // Create an UpsertDistinctOn and a left-join for the single arbiter. var canaryCol *scopeColumn - mb.arbiters.ForEach(func(name string, conflictOrds util.FastIntSet, pred tree.Expr, canaryOrd int) { + mb.arbiters.ForEach(func(name string, conflictOrds intsets.FastIntSet, pred tree.Expr, canaryOrd int) { // If the arbiter has a partial predicate, project a new column that // allows the UpsertDistinctOn to only de-duplicate insert rows that // satisfy the predicate. See projectPartialArbiterDistinctColumn for diff --git a/pkg/sql/opt/optbuilder/join.go b/pkg/sql/opt/optbuilder/join.go index 32423e44b00e..577b8fab4a94 100644 --- a/pkg/sql/opt/optbuilder/join.go +++ b/pkg/sql/opt/optbuilder/join.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -175,8 +175,8 @@ func (b *Builder) validateJoinTableNames(leftScope, rightScope *scope) { // column that has a different table name than the previous column. This is a // fast way of reducing the set of columns that need to checked for duplicate // names by validateJoinTableNames. -func (b *Builder) findJoinColsToValidate(scope *scope) util.FastIntSet { - var ords util.FastIntSet +func (b *Builder) findJoinColsToValidate(scope *scope) intsets.FastIntSet { + var ords intsets.FastIntSet for i := range scope.cols { // Allow joins of sources that define columns with no // associated table name. At worst, the USING/NATURAL diff --git a/pkg/sql/opt/optbuilder/mutation_builder.go b/pkg/sql/opt/optbuilder/mutation_builder.go index 07173e9722b2..da26468d75ee 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder.go +++ b/pkg/sql/opt/optbuilder/mutation_builder.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -1214,8 +1214,8 @@ func (mb *mutationBuilder) parseUniqueConstraintPredicateExpr(uniq cat.UniqueOrd // getIndexLaxKeyOrdinals returns the ordinals of all lax key columns in the // given index. A column's ordinal is the ordered position of that column in the // owning table. -func getIndexLaxKeyOrdinals(index cat.Index) util.FastIntSet { - var keyOrds util.FastIntSet +func getIndexLaxKeyOrdinals(index cat.Index) intsets.FastIntSet { + var keyOrds intsets.FastIntSet for i, n := 0, index.LaxKeyColumnCount(); i < n; i++ { keyOrds.Add(index.Column(i).Ordinal()) } @@ -1225,8 +1225,8 @@ func getIndexLaxKeyOrdinals(index cat.Index) util.FastIntSet { // getUniqueConstraintOrdinals returns the ordinals of all columns in the given // unique constraint. A column's ordinal is the ordered position of that column // in the owning table. -func getUniqueConstraintOrdinals(tab cat.Table, uc cat.UniqueConstraint) util.FastIntSet { - var ucOrds util.FastIntSet +func getUniqueConstraintOrdinals(tab cat.Table, uc cat.UniqueConstraint) intsets.FastIntSet { + var ucOrds intsets.FastIntSet for i, n := 0, uc.ColumnCount(); i < n; i++ { ucOrds.Add(uc.ColumnOrdinal(tab, i)) } @@ -1236,10 +1236,10 @@ func getUniqueConstraintOrdinals(tab cat.Table, uc cat.UniqueConstraint) util.Fa // getExplicitPrimaryKeyOrdinals returns the ordinals of the primary key // columns, excluding any implicit partitioning or hash-shard columns in the // primary index. -func getExplicitPrimaryKeyOrdinals(tab cat.Table) util.FastIntSet { +func getExplicitPrimaryKeyOrdinals(tab cat.Table) intsets.FastIntSet { index := tab.Index(cat.PrimaryIndex) skipCols := index.ImplicitColumnCount() - var keyOrds util.FastIntSet + var keyOrds intsets.FastIntSet for i, n := skipCols, index.LaxKeyColumnCount(); i < n; i++ { keyOrds.Add(index.Column(i).Ordinal()) } diff --git a/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go b/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go index ec4ca33835b5..6b7e14fa26a7 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go +++ b/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -92,7 +92,7 @@ func (mb *mutationBuilder) findArbiters(onConflict *tree.OnConflict) arbiterSet )) } // We have to infer an arbiter set. - var ords util.FastIntSet + var ords intsets.FastIntSet for _, name := range onConflict.Columns { found := false for i, n := 0, mb.tab.ColumnCount(); i < n; i++ { @@ -151,7 +151,7 @@ func partialIndexArbiterError(onConflict *tree.OnConflict, tableName tree.Name) // found. // 3. Otherwise, returns all partial arbiter indexes and constraints. func (mb *mutationBuilder) inferArbitersFromConflictOrds( - conflictOrds util.FastIntSet, arbiterPredicate tree.Expr, + conflictOrds intsets.FastIntSet, arbiterPredicate tree.Expr, ) arbiterSet { // If conflictOrds is empty, then all unique indexes and unique without // index constraints are arbiters. @@ -277,7 +277,7 @@ func (mb *mutationBuilder) inferArbitersFromConflictOrds( // - pred is the partial index or constraint predicate. If the arbiter is // not a partial index or constraint, pred is nil. func (mb *mutationBuilder) buildAntiJoinForDoNothingArbiter( - inScope *scope, conflictOrds util.FastIntSet, pred tree.Expr, + inScope *scope, conflictOrds intsets.FastIntSet, pred tree.Expr, ) { // Build the right side of the anti-join. Use a new metadata instance // of the mutation table so that a different set of column IDs are used for @@ -359,7 +359,7 @@ func (mb *mutationBuilder) buildAntiJoinForDoNothingArbiter( // only de-duplicate insert rows that satisfy the partial index predicate. // If the arbiter is not a partial index, partialIndexDistinctCol is nil. func (mb *mutationBuilder) buildLeftJoinForUpsertArbiter( - inScope *scope, conflictOrds util.FastIntSet, pred tree.Expr, + inScope *scope, conflictOrds intsets.FastIntSet, pred tree.Expr, ) { // Build the right side of the left outer join. Use a different instance of // table metadata so that col IDs do not overlap. @@ -450,7 +450,7 @@ func (mb *mutationBuilder) buildLeftJoinForUpsertArbiter( // should trigger an error. If empty, no error is triggered. func (mb *mutationBuilder) buildDistinctOnForArbiter( insertColScope *scope, - conflictOrds util.FastIntSet, + conflictOrds intsets.FastIntSet, partialArbiterDistinctCol *scopeColumn, errorOnDup string, ) { diff --git a/pkg/sql/opt/optbuilder/mutation_builder_unique.go b/pkg/sql/opt/optbuilder/mutation_builder_unique.go index afbcdc610a45..aebba9c61a10 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder_unique.go +++ b/pkg/sql/opt/optbuilder/mutation_builder_unique.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // UniquenessChecksForGenRandomUUIDClusterMode controls the cluster setting for @@ -195,11 +195,11 @@ type uniqueCheckHelper struct { // uniqueOrdinals are the table ordinals of the unique columns in the table // that is being mutated. They correspond 1-to-1 to the columns in the // UniqueConstraint. - uniqueOrdinals util.FastIntSet + uniqueOrdinals intsets.FastIntSet // primaryKeyOrdinals includes the ordinals from any primary key columns // that are not included in uniqueOrdinals. - primaryKeyOrdinals util.FastIntSet + primaryKeyOrdinals intsets.FastIntSet // The scope and column ordinals of the scan that will serve as the right // side of the semi join for the uniqueness checks. @@ -220,7 +220,7 @@ func (h *uniqueCheckHelper) init(mb *mutationBuilder, uniqueOrdinal int) bool { uniqueOrdinal: uniqueOrdinal, } - var uniqueOrds util.FastIntSet + var uniqueOrds intsets.FastIntSet for i, n := 0, h.unique.ColumnCount(); i < n; i++ { uniqueOrds.Add(h.unique.ColumnOrdinal(mb.tab, i)) } diff --git a/pkg/sql/opt/partialidx/BUILD.bazel b/pkg/sql/opt/partialidx/BUILD.bazel index 872f9113c195..f9bfb3cc9471 100644 --- a/pkg/sql/opt/partialidx/BUILD.bazel +++ b/pkg/sql/opt/partialidx/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//pkg/sql/opt/memo", "//pkg/sql/opt/norm", "//pkg/sql/sem/eval", - "//pkg/util", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/opt/partialidx/implicator.go b/pkg/sql/opt/partialidx/implicator.go index 6584dd156436..1904928a163f 100644 --- a/pkg/sql/opt/partialidx/implicator.go +++ b/pkg/sql/opt/partialidx/implicator.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/norm" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -208,7 +208,7 @@ func (im *Implicator) FiltersImplyPredicate( func (im *Implicator) filtersImplyPredicateFastPath( filters memo.FiltersExpr, pred memo.FiltersExpr, ) (remainingFilters memo.FiltersExpr, ok bool) { - var filtersToRemove util.FastIntSet + var filtersToRemove intsets.FastIntSet // For every FiltersItem in pred, search for a matching FiltersItem in // filters. diff --git a/pkg/sql/opt/partition/BUILD.bazel b/pkg/sql/opt/partition/BUILD.bazel index dacae7081c88..31559266b77b 100644 --- a/pkg/sql/opt/partition/BUILD.bazel +++ b/pkg/sql/opt/partition/BUILD.bazel @@ -14,7 +14,7 @@ go_library( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", ], ) @@ -30,7 +30,7 @@ go_test( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/leaktest", ], ) diff --git a/pkg/sql/opt/partition/locality.go b/pkg/sql/opt/partition/locality.go index bdea23cd81c4..123c1b637167 100644 --- a/pkg/sql/opt/partition/locality.go +++ b/pkg/sql/opt/partition/locality.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -63,7 +63,7 @@ type PrefixSorter struct { // The set of ordinal numbers indexing into the Entry slice, representing // which Prefixes (partitions) are 100% local to the gateway region - LocalPartitions util.FastIntSet + LocalPartitions intsets.FastIntSet } // String returns a string representation of the PrefixSorter. @@ -150,13 +150,13 @@ func PrefixesToString(prefixes []Prefix) string { // determined. func HasMixOfLocalAndRemotePartitions( evalCtx *eval.Context, index cat.Index, -) (localPartitions util.FastIntSet, ok bool) { +) (localPartitions intsets.FastIntSet, ok bool) { if index.PartitionCount() < 2 { - return util.FastIntSet{}, false + return intsets.FastIntSet{}, false } var localRegion string if localRegion, ok = evalCtx.GetLocalRegion(); !ok { - return util.FastIntSet{}, false + return intsets.FastIntSet{}, false } var foundLocal, foundRemote bool for i, n := 0, index.PartitionCount(); i < n; i++ { @@ -177,7 +177,7 @@ func HasMixOfLocalAndRemotePartitions( // group of equal-length prefixes they are ordered by value. // This is the main function for building a PrefixSorter. func GetSortedPrefixes( - index cat.Index, localPartitions util.FastIntSet, evalCtx *eval.Context, + index cat.Index, localPartitions intsets.FastIntSet, evalCtx *eval.Context, ) PrefixSorter { if index == nil || index.PartitionCount() < 2 { return PrefixSorter{} diff --git a/pkg/sql/opt/partition/locality_test.go b/pkg/sql/opt/partition/locality_test.go index d5ecd98fa827..f8da0f221201 100644 --- a/pkg/sql/opt/partition/locality_test.go +++ b/pkg/sql/opt/partition/locality_test.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" ) @@ -93,7 +93,7 @@ func TestPrefixSorter(t *testing.T) { // only has the partitions and ps (PrefixSorter) elements populated. partKeys := parsePartitionKeys(&evalCtx, tc.partitionKeys) partitions := make([]testcat.Partition, len(partKeys)) - localPartitions := util.FastIntSet{} + localPartitions := intsets.FastIntSet{} for j, partitionKey := range partKeys { partitionDatums := make([]tree.Datums, 1) partitionDatums[0] = partitionKey diff --git a/pkg/sql/opt/props/func_dep.go b/pkg/sql/opt/props/func_dep.go index 8bf25a7ccd36..4ec8fc3d7c93 100644 --- a/pkg/sql/opt/props/func_dep.go +++ b/pkg/sql/opt/props/func_dep.go @@ -710,7 +710,7 @@ func (f *FuncDepSet) ComputeEquivClosure(cols opt.ColSet) opt.ColSet { // ComputeEquivClosureNoCopy is similar to ComputeEquivClosure, but computes the // closure in-place (e.g. the argument ColSet will be mutated). It should only // be used when it is ok to mutate the argument. This avoids allocations when -// columns overflow the small set of util.FastIntSet. +// columns overflow the small set of intsets.FastIntSet. func (f *FuncDepSet) ComputeEquivClosureNoCopy(cols opt.ColSet) opt.ColSet { // Don't need to get transitive closure, because equivalence closures are // already maintained for every column. diff --git a/pkg/sql/opt/schema_dependencies.go b/pkg/sql/opt/schema_dependencies.go index fe698bd59688..aed53509938d 100644 --- a/pkg/sql/opt/schema_dependencies.go +++ b/pkg/sql/opt/schema_dependencies.go @@ -14,7 +14,7 @@ import ( "sort" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // SchemaDeps contains information about the dependencies of objects in a @@ -28,7 +28,7 @@ type SchemaDep struct { // ColumnOrdinals is the set of column ordinals that are referenced for this // table. - ColumnOrdinals util.FastIntSet + ColumnOrdinals intsets.FastIntSet // ColumnIDToOrd maps a scopeColumn's ColumnID to its ColumnOrdinal. This // helps us add only the columns that are actually referenced by the object's @@ -44,7 +44,7 @@ type SchemaDep struct { // SchemaTypeDeps contains a set of the IDs of types that // this object depends on. -type SchemaTypeDeps = util.FastIntSet +type SchemaTypeDeps = intsets.FastIntSet // GetColumnNames returns a sorted list of the names of the column dependencies // and a boolean to determine if the dependency was a table. diff --git a/pkg/sql/opt/testutils/opttester/BUILD.bazel b/pkg/sql/opt/testutils/opttester/BUILD.bazel index 43df1cb13970..dcb6d5973158 100644 --- a/pkg/sql/opt/testutils/opttester/BUILD.bazel +++ b/pkg/sql/opt/testutils/opttester/BUILD.bazel @@ -51,6 +51,7 @@ go_library( "//pkg/testutils/sqlutils", "//pkg/util", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/stop", "//pkg/util/timeutil", diff --git a/pkg/sql/opt/testutils/opttester/opt_tester.go b/pkg/sql/opt/testutils/opttester/opt_tester.go index 9d5f47a92a57..5153c010d29e 100644 --- a/pkg/sql/opt/testutils/opttester/opt_tester.go +++ b/pkg/sql/opt/testutils/opttester/opt_tester.go @@ -65,6 +65,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/floatcmp" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -109,7 +110,7 @@ var ( ) // RuleSet efficiently stores an unordered set of RuleNames. -type RuleSet = util.FastIntSet +type RuleSet = intsets.FastIntSet // OptTester is a helper for testing the various optimizer components. It // contains the boiler-plate code for the following useful tasks: @@ -209,7 +210,7 @@ type Flags struct { // IgnoreTables specifies the subset of stats tables which should not be // outputted by the stats-quality command. - IgnoreTables util.FastIntSet + IgnoreTables intsets.FastIntSet // File specifies the name of the file to import. This field is only used by // the import command. @@ -863,10 +864,10 @@ func fillInLazyProps(e opt.Expr) { rel = rel.FirstExpr() // Derive columns that are candidates for pruning. - norm.DerivePruneCols(rel, util.FastIntSet{} /* disabledRules */) + norm.DerivePruneCols(rel, intsets.FastIntSet{} /* disabledRules */) // Derive columns that are candidates for null rejection. - norm.DeriveRejectNullCols(rel, util.FastIntSet{} /* disabledRules */) + norm.DeriveRejectNullCols(rel, intsets.FastIntSet{} /* disabledRules */) // Make sure the interesting orderings are calculated. ordering.DeriveInterestingOrderings(rel) @@ -1026,7 +1027,7 @@ func (f *Flags) Set(arg datadriven.CmdArg) error { f.Table = arg.Vals[0] case "ignore-tables": - var tables util.FastIntSet + var tables intsets.FastIntSet addTables := func(val string) error { table, err := strconv.Atoi(val) if err != nil { diff --git a/pkg/sql/opt/testutils/testcat/BUILD.bazel b/pkg/sql/opt/testutils/testcat/BUILD.bazel index e34670b1cfc0..bfa64a270c10 100644 --- a/pkg/sql/opt/testutils/testcat/BUILD.bazel +++ b/pkg/sql/opt/testutils/testcat/BUILD.bazel @@ -49,7 +49,7 @@ go_library( "//pkg/sql/stats", "//pkg/sql/types", "//pkg/sql/vtable", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/treeprinter", "@com_github_cockroachdb_errors//:errors", "@com_github_lib_pq//oid", diff --git a/pkg/sql/opt/testutils/testcat/create_table.go b/pkg/sql/opt/testutils/testcat/create_table.go index af537430e2f9..a7efba32a2ca 100644 --- a/pkg/sql/opt/testutils/testcat/create_table.go +++ b/pkg/sql/opt/testutils/testcat/create_table.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) type indexType int @@ -812,7 +812,7 @@ func (tt *Table) addIndexWithVersion( } if typ == primaryIndex { - var pkOrdinals util.FastIntSet + var pkOrdinals intsets.FastIntSet for _, c := range idx.Columns { pkOrdinals.Add(c.Ordinal()) } diff --git a/pkg/sql/opt/xform/BUILD.bazel b/pkg/sql/opt/xform/BUILD.bazel index 29ff998fdbb3..49d11e2d733f 100644 --- a/pkg/sql/opt/xform/BUILD.bazel +++ b/pkg/sql/opt/xform/BUILD.bazel @@ -49,15 +49,14 @@ go_library( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", "//pkg/util/buildutil", "//pkg/util/cancelchecker", "//pkg/util/errorutil", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/treeprinter", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", - "@org_golang_x_tools//container/intsets", ], ) diff --git a/pkg/sql/opt/xform/coster.go b/pkg/sql/opt/xform/coster.go index f029d437ff97..1babd94ad248 100644 --- a/pkg/sql/opt/xform/coster.go +++ b/pkg/sql/opt/xform/coster.go @@ -24,10 +24,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" - "golang.org/x/tools/container/intsets" ) // Coster is used by the optimizer to assign a cost to a candidate expression @@ -797,7 +796,7 @@ func (c *coster) computeSelectCost(sel *memo.SelectExpr, required *physical.Requ inputRowCount = math.Min(inputRowCount, required.LimitHint/selectivity) } - filterSetup, filterPerRow := c.computeFiltersCost(sel.Filters, util.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(sel.Filters, intsets.FastIntSet{}) cost := memo.Cost(inputRowCount) * filterPerRow cost += filterSetup return cost @@ -895,7 +894,7 @@ func (c *coster) computeMergeJoinCost(join *memo.MergeJoinExpr) memo.Cost { // smaller right side is preferred to the symmetric join. cost := memo.Cost(0.9*leftRowCount+1.1*rightRowCount) * cpuCostFactor - filterSetup, filterPerRow := c.computeFiltersCost(join.On, util.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.FastIntSet{}) cost += filterSetup // Add the CPU cost of emitting the rows. @@ -1018,7 +1017,7 @@ func (c *coster) computeIndexLookupJoinCost( } cost := memo.Cost(lookupCount) * perLookupCost - filterSetup, filterPerRow := c.computeFiltersCost(on, util.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(on, intsets.FastIntSet{}) cost += filterSetup // Each lookup might retrieve many rows; add the IO cost of retrieving the @@ -1097,7 +1096,7 @@ func (c *coster) computeInvertedJoinCost( perLookupCost *= 5 cost := memo.Cost(lookupCount) * perLookupCost - filterSetup, filterPerRow := c.computeFiltersCost(join.On, util.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.FastIntSet{}) cost += filterSetup // Each lookup might retrieve many rows; add the IO cost of retrieving the @@ -1147,7 +1146,7 @@ func (c *coster) computeExprCost(expr opt.Expr) memo.Cost { // because they do not add to the cost. This can happen when a condition still // exists in the filters even though it is handled by the join. func (c *coster) computeFiltersCost( - filters memo.FiltersExpr, filtersToSkip util.FastIntSet, + filters memo.FiltersExpr, filtersToSkip intsets.FastIntSet, ) (setupCost, perRowCost memo.Cost) { // Add a base perRowCost so that callers do not need to have their own // base per-row cost. @@ -1183,7 +1182,7 @@ func (c *coster) computeZigzagJoinCost(join *memo.ZigzagJoinExpr) memo.Cost { scanCost := c.rowScanCost(join.LeftTable, join.LeftIndex, leftCols) scanCost += c.rowScanCost(join.RightTable, join.RightIndex, rightCols) - filterSetup, filterPerRow := c.computeFiltersCost(join.On, util.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.FastIntSet{}) // It is much more expensive to do a seek in zigzag join vs. lookup join // because zigzag join starts a new scan for every seek via diff --git a/pkg/sql/opt/xform/explorer.go b/pkg/sql/opt/xform/explorer.go index b70141e2af09..0d7630709adb 100644 --- a/pkg/sql/opt/xform/explorer.go +++ b/pkg/sql/opt/xform/explorer.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/norm" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // explorer generates alternate expressions that are logically equivalent to @@ -239,7 +239,7 @@ type exploreState struct { // fullyExploredMembers is a set of ordinal positions of members within the // memo group. Once a member expression has been fully explored, its ordinal // is added to this set. - fullyExploredMembers util.FastIntSet + fullyExploredMembers intsets.FastIntSet } // isMemberFullyExplored is true if the member at the given ordinal position diff --git a/pkg/sql/opt/xform/join_funcs.go b/pkg/sql/opt/xform/join_funcs.go index abaec69c58c9..cfe2315b7eab 100644 --- a/pkg/sql/opt/xform/join_funcs.go +++ b/pkg/sql/opt/xform/join_funcs.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -1391,14 +1391,14 @@ func (c *CustomFuncs) GetLocalityOptimizedLookupJoinExprs( // that target local partitions. func (c *CustomFuncs) getLocalValues( values tree.Datums, ps partition.PrefixSorter, -) util.FastIntSet { +) intsets.FastIntSet { // The PrefixSorter has collected all the prefixes from all the different // partitions (remembering which ones came from local partitions), and has // sorted them so that longer prefixes come before shorter prefixes. For each // span in the scanConstraint, we will iterate through the list of prefixes // until we find a match, so ordering them with longer prefixes first ensures // that the correct match is found. - var localVals util.FastIntSet + var localVals intsets.FastIntSet for i, val := range values { if match, ok := constraint.FindMatchOnSingleColumn(val, ps); ok { if match.IsLocal { @@ -1413,7 +1413,7 @@ func (c *CustomFuncs) getLocalValues( // by putting the Datums at positions identified by localValOrds into the local // slice, and the remaining Datums into the remote slice. func (c *CustomFuncs) splitValues( - values tree.Datums, localValOrds util.FastIntSet, + values tree.Datums, localValOrds intsets.FastIntSet, ) (localVals, remoteVals tree.Datums) { localVals = make(tree.Datums, 0, localValOrds.Len()) remoteVals = make(tree.Datums, 0, len(values)-len(localVals)) diff --git a/pkg/sql/opt/xform/join_order_builder.go b/pkg/sql/opt/xform/join_order_builder.go index 7315a73ac712..26731d0b5a4d 100644 --- a/pkg/sql/opt/xform/join_order_builder.go +++ b/pkg/sql/opt/xform/join_order_builder.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/norm" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -1767,7 +1767,7 @@ func getOpIdx(e *edge) int { } } -type edgeSet = util.FastIntSet +type edgeSet = intsets.FastIntSet type bitSet uint64 diff --git a/pkg/sql/opt/xform/optimizer.go b/pkg/sql/opt/xform/optimizer.go index 2bc4baf00282..d3408022804b 100644 --- a/pkg/sql/opt/xform/optimizer.go +++ b/pkg/sql/opt/xform/optimizer.go @@ -22,9 +22,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/ordering" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/cancelchecker" "github.com/cockroachdb/cockroach/pkg/util/errorutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -40,7 +40,7 @@ type MatchedRuleFunc = norm.MatchedRuleFunc type AppliedRuleFunc = norm.AppliedRuleFunc // RuleSet efficiently stores an unordered set of RuleNames. -type RuleSet = util.FastIntSet +type RuleSet = intsets.FastIntSet // Optimizer transforms an input expression tree into the logically equivalent // output expression tree with the lowest possible execution cost. @@ -950,7 +950,7 @@ type groupState struct { // expression in the group that has been fully optimized for the required // properties. These never need to be recosted, no matter how many additional // optimization passes are made. - fullyOptimizedExprs util.FastIntSet + fullyOptimizedExprs intsets.FastIntSet // explore is used by the explorer to store intermediate state so that // redundant work is minimized. @@ -1004,7 +1004,7 @@ func (a *groupStateAlloc) allocate() *groupState { // disableRulesRandom disables rules with the given probability for testing. func (o *Optimizer) disableRulesRandom(probability float64) { - essentialRules := util.MakeFastIntSet( + essentialRules := intsets.MakeFastIntSet( // Needed to prevent constraint building from failing. int(opt.NormalizeInConst), // Needed when an index is forced. diff --git a/pkg/sql/opt/xform/scan_funcs.go b/pkg/sql/opt/xform/scan_funcs.go index e2610d05cc22..4284481b17c4 100644 --- a/pkg/sql/opt/xform/scan_funcs.go +++ b/pkg/sql/opt/xform/scan_funcs.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/partition" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -360,10 +360,10 @@ func (c *CustomFuncs) buildAllPartitionsConstraint( // target local partitions. func (c *CustomFuncs) getLocalSpans( scanConstraint *constraint.Constraint, ps partition.PrefixSorter, -) util.FastIntSet { +) intsets.FastIntSet { // Iterate through the spans and determine whether each one matches // with a prefix from a local partition. - var localSpans util.FastIntSet + var localSpans intsets.FastIntSet for i, n := 0, scanConstraint.Spans.Count(); i < n; i++ { span := scanConstraint.Spans.Get(i) if match, ok := constraint.FindMatch(span, ps); ok { @@ -379,7 +379,7 @@ func (c *CustomFuncs) getLocalSpans( // by putting the spans at positions identified by localSpanOrds into the local // constraint, and the remaining spans into the remote constraint. func (c *CustomFuncs) splitSpans( - origConstraint *constraint.Constraint, localSpanOrds util.FastIntSet, + origConstraint *constraint.Constraint, localSpanOrds intsets.FastIntSet, ) (localConstraint, remoteConstraint constraint.Constraint) { allSpansCount := origConstraint.Spans.Count() localSpansCount := localSpanOrds.Len() diff --git a/pkg/sql/opt/xform/select_funcs.go b/pkg/sql/opt/xform/select_funcs.go index 93a8d1ce68b4..5a8ce2c871e6 100644 --- a/pkg/sql/opt/xform/select_funcs.go +++ b/pkg/sql/opt/xform/select_funcs.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/partition" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // IsLocking returns true if the ScanPrivate is configured to use a row-level @@ -1107,7 +1107,7 @@ func (c *CustomFuncs) GenerateZigzagJoins( iter2.ForEachStartingAfter(leftIndex.Ordinal(), func(rightIndex cat.Index, innerFilters memo.FiltersExpr, rightCols opt.ColSet, _ bool, _ memo.ProjectionsExpr) { // Check if we have zigzag hints. if scanPrivate.Flags.ForceZigzag { - indexes := util.MakeFastIntSet(leftIndex.Ordinal(), rightIndex.Ordinal()) + indexes := intsets.MakeFastIntSet(leftIndex.Ordinal(), rightIndex.Ordinal()) forceIndexes := scanPrivate.Flags.ZigzagIndexes if !forceIndexes.SubsetOf(indexes) { return diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 5a222d9fb506..a1c2df7674e9 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -41,9 +41,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treewindow" "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/errorutil" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -297,7 +297,7 @@ func constructSimpleProjectForPlanNode( } func hasDuplicates(cols []exec.NodeColumnOrdinal) bool { - var set util.FastIntSet + var set intsets.FastIntSet for _, c := range cols { if set.Contains(int(c)) { return true diff --git a/pkg/sql/physicalplan/BUILD.bazel b/pkg/sql/physicalplan/BUILD.bazel index 3a6b97df6039..c00392027ddc 100644 --- a/pkg/sql/physicalplan/BUILD.bazel +++ b/pkg/sql/physicalplan/BUILD.bazel @@ -29,8 +29,8 @@ go_library( "//pkg/sql/sem/tree", "//pkg/sql/sem/tree/treebin", "//pkg/sql/types", - "//pkg/util", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/randutil", "//pkg/util/uuid", diff --git a/pkg/sql/physicalplan/physical_plan.go b/pkg/sql/physicalplan/physical_plan.go index daacd46bd9c0..a2cbb1700eb9 100644 --- a/pkg/sql/physicalplan/physical_plan.go +++ b/pkg/sql/physicalplan/physical_plan.go @@ -25,7 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" ) @@ -1149,7 +1149,7 @@ func (p *PhysicalPlan) EnsureSingleStreamPerNode( forceSerialization bool, post execinfrapb.PostProcessSpec, ) { // Fast path - check if we need to do anything. - var nodes util.FastIntSet + var nodes intsets.FastIntSet var foundDuplicates bool for _, pIdx := range p.ResultRouters { proc := &p.Processors[pIdx] diff --git a/pkg/sql/row/BUILD.bazel b/pkg/sql/row/BUILD.bazel index 24fe1a0d03ea..8a2d6bcef704 100644 --- a/pkg/sql/row/BUILD.bazel +++ b/pkg/sql/row/BUILD.bazel @@ -68,6 +68,7 @@ go_library( "//pkg/util/admission/admissionpb", "//pkg/util/encoding", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/log/eventpb", "//pkg/util/log/logpb", diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index a9f178a943a7..00925741ff39 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -32,9 +32,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -99,7 +99,7 @@ type tableInfo struct { // The set of indexes into spec.FetchedColumns that are required for columns // in the value part. - neededValueColsByIdx util.FastIntSet + neededValueColsByIdx intsets.FastIntSet // The number of needed columns from the value part of the row. Once we've // seen this number of value columns for a particular row, we can stop diff --git a/pkg/sql/row/helper.go b/pkg/sql/row/helper.go index ebaaa1ab57d3..3a70f1bdaf0d 100644 --- a/pkg/sql/row/helper.go +++ b/pkg/sql/row/helper.go @@ -28,8 +28,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowenc/rowencpb" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/cockroach/pkg/util/log/logpb" @@ -152,7 +152,7 @@ func newRowHelper( func (rh *rowHelper) encodeIndexes( colIDtoRowIndex catalog.TableColMap, values []tree.Datum, - ignoreIndexes util.FastIntSet, + ignoreIndexes intsets.FastIntSet, includeEmpty bool, ) ( primaryIndexKey []byte, @@ -203,7 +203,7 @@ func (rh *rowHelper) encodePrimaryIndex( func (rh *rowHelper) encodeSecondaryIndexes( colIDtoRowIndex catalog.TableColMap, values []tree.Datum, - ignoreIndexes util.FastIntSet, + ignoreIndexes intsets.FastIntSet, includeEmpty bool, ) (secondaryIndexEntries map[catalog.Index][]rowenc.IndexEntry, err error) { diff --git a/pkg/sql/row/partial_index.go b/pkg/sql/row/partial_index.go index 597046e7e6d1..0e8425121c50 100644 --- a/pkg/sql/row/partial_index.go +++ b/pkg/sql/row/partial_index.go @@ -13,7 +13,7 @@ package row import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // PartialIndexUpdateHelper keeps track of partial indexes that should be not @@ -24,10 +24,10 @@ import ( // index. type PartialIndexUpdateHelper struct { // IgnoreForPut is a set of index IDs to ignore for Put operations. - IgnoreForPut util.FastIntSet + IgnoreForPut intsets.FastIntSet // IgnoreForDel is a set of index IDs to ignore for Del operations. - IgnoreForDel util.FastIntSet + IgnoreForDel intsets.FastIntSet } // Init initializes a PartialIndexUpdateHelper to track partial index IDs that diff --git a/pkg/sql/row/row_converter.go b/pkg/sql/row/row_converter.go index 9ef60f75f0bb..83ce6cab8d6c 100644 --- a/pkg/sql/row/row_converter.go +++ b/pkg/sql/row/row_converter.go @@ -30,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -205,7 +206,7 @@ type DatumRowConverter struct { // Tracks which column indices in the set of visible columns are part of the // user specified target columns. This can be used before populating Datums // to filter out unwanted column data. - TargetColOrds util.FastIntSet + TargetColOrds intsets.FastIntSet // The rest of these are derived from tableDesc, just cached here. ri Inserter diff --git a/pkg/sql/rowenc/BUILD.bazel b/pkg/sql/rowenc/BUILD.bazel index 45bc3a7af5fc..0c5a7092638d 100644 --- a/pkg/sql/rowenc/BUILD.bazel +++ b/pkg/sql/rowenc/BUILD.bazel @@ -31,9 +31,9 @@ go_library( "//pkg/sql/sem/tree", "//pkg/sql/sqlerrors", "//pkg/sql/types", - "//pkg/util", "//pkg/util/buildutil", "//pkg/util/encoding", + "//pkg/util/intsets", "//pkg/util/json", "//pkg/util/mon", "//pkg/util/protoutil", diff --git a/pkg/sql/rowenc/index_encoding.go b/pkg/sql/rowenc/index_encoding.go index c327d53d7ded..a223b00bc30b 100644 --- a/pkg/sql/rowenc/index_encoding.go +++ b/pkg/sql/rowenc/index_encoding.go @@ -31,8 +31,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/json" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/protoutil" @@ -162,7 +162,7 @@ func MakeSpanFromEncDatums( // retrieve neededCols for the specified table and index. The returned descpb.FamilyIDs // are in sorted order. func NeededColumnFamilyIDs( - neededColOrdinals util.FastIntSet, table catalog.TableDescriptor, index catalog.Index, + neededColOrdinals intsets.FastIntSet, table catalog.TableDescriptor, index catalog.Index, ) []descpb.FamilyID { if table.NumFamilies() == 1 { return []descpb.FamilyID{table.GetFamilies()[0].ID} @@ -171,9 +171,9 @@ func NeededColumnFamilyIDs( // Build some necessary data structures for column metadata. columns := table.DeletableColumns() colIdxMap := catalog.ColumnIDToOrdinalMap(columns) - var indexedCols util.FastIntSet - var compositeCols util.FastIntSet - var extraCols util.FastIntSet + var indexedCols intsets.FastIntSet + var compositeCols intsets.FastIntSet + var extraCols intsets.FastIntSet for i := 0; i < index.NumKeyColumns(); i++ { columnID := index.GetKeyColumnID(i) columnOrdinal := colIdxMap.GetDefault(columnID) diff --git a/pkg/sql/rowexec/BUILD.bazel b/pkg/sql/rowexec/BUILD.bazel index e344622fca98..cb6acfc80b6d 100644 --- a/pkg/sql/rowexec/BUILD.bazel +++ b/pkg/sql/rowexec/BUILD.bazel @@ -92,6 +92,7 @@ go_library( "//pkg/util/ctxgroup", "//pkg/util/encoding", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/mon", "//pkg/util/optional", @@ -189,9 +190,9 @@ go_test( "//pkg/testutils/skip", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", - "//pkg/util", "//pkg/util/encoding", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/json", "//pkg/util/leaktest", "//pkg/util/log", diff --git a/pkg/sql/rowexec/aggregator_test.go b/pkg/sql/rowexec/aggregator_test.go index 0ddebfc351aa..7c4a0e6269cf 100644 --- a/pkg/sql/rowexec/aggregator_test.go +++ b/pkg/sql/rowexec/aggregator_test.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -632,7 +632,7 @@ func makeGroupedIntRows(groupSize, numCols int, groupedCols []int) rowenc.EncDat numRows := intPow(groupSize, len(groupedCols)+1) rows := make(rowenc.EncDatumRows, numRows) - groupColSet := util.MakeFastIntSet(groupedCols...) + groupColSet := intsets.MakeFastIntSet(groupedCols...) getGroupedColVal := func(rowIdx, colIdx int) int { rank := -1 for i, c := range groupedCols { diff --git a/pkg/sql/rowexec/joinreader_test.go b/pkg/sql/rowexec/joinreader_test.go index a165a9891385..c853d1b8909b 100644 --- a/pkg/sql/rowexec/joinreader_test.go +++ b/pkg/sql/rowexec/joinreader_test.go @@ -44,7 +44,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -1122,7 +1122,7 @@ func TestJoinReader(t *testing.T) { index := td.ActiveIndexes()[c.indexIdx] var fetchColIDs []descpb.ColumnID - var neededOrds util.FastIntSet + var neededOrds intsets.FastIntSet for _, ord := range c.fetchCols { neededOrds.Add(int(ord)) fetchColIDs = append(fetchColIDs, td.PublicColumns()[ord].GetID()) @@ -1588,7 +1588,7 @@ func TestIndexJoiner(t *testing.T) { ); err != nil { t.Fatal(err) } - splitter := span.MakeSplitter(c.desc, c.desc.GetPrimaryIndex(), util.MakeFastIntSet(0, 1, 2, 3)) + splitter := span.MakeSplitter(c.desc, c.desc.GetPrimaryIndex(), intsets.MakeFastIntSet(0, 1, 2, 3)) spec := execinfrapb.JoinReaderSpec{ FetchSpec: fetchSpec, diff --git a/pkg/sql/rowexec/mergejoiner.go b/pkg/sql/rowexec/mergejoiner.go index 40e4cf3b15df..d2689ae1c81b 100644 --- a/pkg/sql/rowexec/mergejoiner.go +++ b/pkg/sql/rowexec/mergejoiner.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execstats" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/cancelchecker" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/optional" "github.com/cockroachdb/errors" ) @@ -40,7 +40,7 @@ type mergeJoiner struct { leftIdx, rightIdx int trackMatchedRight bool emitUnmatchedRight bool - matchedRight util.FastIntSet + matchedRight intsets.FastIntSet matchedRightCount int streamMerger streamMerger @@ -246,7 +246,7 @@ func (m *mergeJoiner) nextRow() (rowenc.EncDatumRow, *execinfrapb.ProducerMetada m.emitUnmatchedRight = shouldEmitUnmatchedRow(rightSide, m.joinType) m.leftIdx, m.rightIdx = 0, 0 if m.trackMatchedRight { - m.matchedRight = util.FastIntSet{} + m.matchedRight = intsets.FastIntSet{} } } } diff --git a/pkg/sql/rowexec/sample_aggregator.go b/pkg/sql/rowexec/sample_aggregator.go index 7111153f431e..0e88b6fb1660 100644 --- a/pkg/sql/rowexec/sample_aggregator.go +++ b/pkg/sql/rowexec/sample_aggregator.go @@ -31,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -134,7 +135,7 @@ func newSampleAggregator( invSketch: make(map[uint32]*sketchInfo, len(spec.InvertedSketches)), } - var sampleCols util.FastIntSet + var sampleCols intsets.FastIntSet for i := range spec.Sketches { s.sketches[i] = sketchInfo{ spec: spec.Sketches[i], @@ -156,7 +157,7 @@ func newSampleAggregator( // The datums are converted to their inverted index bytes and sent as a // single DBytes column. We do not use DEncodedKey here because it would // introduce backward compatibility complications. - var srCols util.FastIntSet + var srCols intsets.FastIntSet srCols.Add(0) sr.Init(int(spec.SampleSize), int(spec.MinSampleSize), bytesRowType, &s.memAcc, srCols) col := spec.InvertedSketches[i].Columns[0] diff --git a/pkg/sql/rowexec/sampler.go b/pkg/sql/rowexec/sampler.go index a7840585bc18..f88f01f1c5d5 100644 --- a/pkg/sql/rowexec/sampler.go +++ b/pkg/sql/rowexec/sampler.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/randutil" @@ -129,7 +129,7 @@ func newSamplerProcessor( } inTypes := input.OutputTypes() - var sampleCols util.FastIntSet + var sampleCols intsets.FastIntSet for i := range spec.Sketches { s.sketches[i] = sketchInfo{ spec: spec.Sketches[i], @@ -145,7 +145,7 @@ func newSamplerProcessor( var sr stats.SampleReservoir // The datums are converted to their inverted index bytes and // sent as single DBytes column. - var srCols util.FastIntSet + var srCols intsets.FastIntSet srCols.Add(0) sr.Init(int(spec.SampleSize), int(spec.MinSampleSize), bytesRowType, &s.memAcc, srCols) col := spec.InvertedSketches[i].Columns[0] diff --git a/pkg/sql/schemachanger/rel/BUILD.bazel b/pkg/sql/schemachanger/rel/BUILD.bazel index 300f5b5118c3..24e31f102c6b 100644 --- a/pkg/sql/schemachanger/rel/BUILD.bazel +++ b/pkg/sql/schemachanger/rel/BUILD.bazel @@ -35,6 +35,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/util", + "//pkg/util/intsets", "//pkg/util/iterutil", "//pkg/util/syncutil", "//pkg/util/uuid", diff --git a/pkg/sql/schemachanger/rel/query.go b/pkg/sql/schemachanger/rel/query.go index ee4a7dcf53ec..00eefe89d945 100644 --- a/pkg/sql/schemachanger/rel/query.go +++ b/pkg/sql/schemachanger/rel/query.go @@ -14,6 +14,7 @@ import ( "sort" "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" ) @@ -126,7 +127,7 @@ func (q *Query) putEvalContext(ec *evalContext) { // Entities returns the entities in the query in their join order. // This method exists primarily for introspection. func (q *Query) Entities() []Var { - var entitySlots util.FastIntSet + var entitySlots intsets.FastIntSet for _, slotIdx := range q.entities { entitySlots.Add(int(slotIdx)) } diff --git a/pkg/sql/schemachanger/rel/query_data.go b/pkg/sql/schemachanger/rel/query_data.go index 385afb823b8b..5d56cb22add6 100644 --- a/pkg/sql/schemachanger/rel/query_data.go +++ b/pkg/sql/schemachanger/rel/query_data.go @@ -13,7 +13,7 @@ package rel import ( "reflect" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) type slotIdx uint16 @@ -129,7 +129,7 @@ func (s *slot) reset() { } func maybeSet( - slots []slot, idx slotIdx, tv typedValue, set *util.FastIntSet, + slots []slot, idx slotIdx, tv typedValue, set *intsets.FastIntSet, ) (foundContradiction bool) { s := &slots[idx] diff --git a/pkg/sql/schemachanger/rel/query_eval.go b/pkg/sql/schemachanger/rel/query_eval.go index 6a4aedac3338..154fbac15f82 100644 --- a/pkg/sql/schemachanger/rel/query_eval.go +++ b/pkg/sql/schemachanger/rel/query_eval.go @@ -14,6 +14,7 @@ import ( "reflect" "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -154,7 +155,7 @@ func (ec *evalContext) iterateNext() error { func (ec *evalContext) visit(e entity) error { // Keep track of which slots were filled as part of this step in the // evaluation and then unset them when we pop out of this stack frame. - var slotsFilled util.FastIntSet + var slotsFilled intsets.FastIntSet defer func() { slotsFilled.ForEach(func(i int) { ec.slots[i].reset() }) }() @@ -311,7 +312,7 @@ func (ec *evalContext) buildWhere() ( } // unify is like unifyReturningContradiction but it does not return the fact. -func unify(facts []fact, s []slot, slotsFilled *util.FastIntSet) (contradictionFound bool) { +func unify(facts []fact, s []slot, slotsFilled *intsets.FastIntSet) (contradictionFound bool) { contradictionFound, _ = unifyReturningContradiction(facts, s, slotsFilled) return contradictionFound } @@ -321,7 +322,7 @@ func unify(facts []fact, s []slot, slotsFilled *util.FastIntSet) (contradictionF // contradiction is returned. Any slots set in the process of unification // are recorded into the set. func unifyReturningContradiction( - facts []fact, s []slot, slotsFilled *util.FastIntSet, + facts []fact, s []slot, slotsFilled *intsets.FastIntSet, ) (contradictionFound bool, contradicted fact) { // TODO(ajwerner): As we unify we could determine that some facts are no // longer relevant. When we do that we could move them to the front and keep diff --git a/pkg/sql/schemachanger/rel/reltest/BUILD.bazel b/pkg/sql/schemachanger/rel/reltest/BUILD.bazel index 30464e0a18f9..c1f4f92dd48a 100644 --- a/pkg/sql/schemachanger/rel/reltest/BUILD.bazel +++ b/pkg/sql/schemachanger/rel/reltest/BUILD.bazel @@ -15,7 +15,7 @@ go_library( deps = [ "//pkg/sql/schemachanger/rel", "//pkg/testutils", - "//pkg/util", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", "@in_gopkg_yaml_v3//:yaml_v3", diff --git a/pkg/sql/schemachanger/rel/reltest/database.go b/pkg/sql/schemachanger/rel/reltest/database.go index fb004db399c7..6585e33106ed 100644 --- a/pkg/sql/schemachanger/rel/reltest/database.go +++ b/pkg/sql/schemachanger/rel/reltest/database.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel" "github.com/cockroachdb/cockroach/pkg/testutils" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) @@ -98,13 +98,13 @@ func (qc QueryTest) run(t *testing.T, indexes int, db *rel.Database) { results = append(results, cur) return nil }); testutils.IsError(err, `failed to find index to satisfy query`) { - if util.MakeFastIntSet(qc.UnsatisfiableIndexes...).Contains(indexes) { + if intsets.MakeFastIntSet(qc.UnsatisfiableIndexes...).Contains(indexes) { return } t.Fatalf("expected to succeed with indexes %d: %v", indexes, err) } else if err != nil { t.Fatal(err) - } else if util.MakeFastIntSet(qc.UnsatisfiableIndexes...).Contains(indexes) { + } else if intsets.MakeFastIntSet(qc.UnsatisfiableIndexes...).Contains(indexes) { t.Fatalf("expected to fail with indexes %d", indexes) } expResults := append(qc.Results[:0:0], qc.Results...) diff --git a/pkg/sql/schemachanger/scexec/BUILD.bazel b/pkg/sql/schemachanger/scexec/BUILD.bazel index 311f3b9cb4ec..d5f447c3a0e4 100644 --- a/pkg/sql/schemachanger/scexec/BUILD.bazel +++ b/pkg/sql/schemachanger/scexec/BUILD.bazel @@ -36,9 +36,9 @@ go_library( "//pkg/sql/schemachanger/scplan", "//pkg/sql/sem/catid", "//pkg/sql/sessiondata", - "//pkg/util", "//pkg/util/ctxgroup", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/log/eventpb", "//pkg/util/log/logpb", diff --git a/pkg/sql/schemachanger/scexec/backfiller/BUILD.bazel b/pkg/sql/schemachanger/scexec/backfiller/BUILD.bazel index 3cc6cf7463ff..7fe1407378e6 100644 --- a/pkg/sql/schemachanger/scexec/backfiller/BUILD.bazel +++ b/pkg/sql/schemachanger/scexec/backfiller/BUILD.bazel @@ -20,7 +20,7 @@ go_library( "//pkg/sql/backfill", "//pkg/sql/catalog/descpb", "//pkg/sql/schemachanger/scexec", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/syncutil", "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/sql/schemachanger/scexec/backfiller/tracker.go b/pkg/sql/schemachanger/scexec/backfiller/tracker.go index cd2353c002e3..dad16e59bf81 100644 --- a/pkg/sql/schemachanger/scexec/backfiller/tracker.go +++ b/pkg/sql/schemachanger/scexec/backfiller/tracker.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/syncutil" ) @@ -438,7 +438,7 @@ type progressReportFlags struct { } func sameIndexIDSet(ds []descpb.IndexID, ds2 []descpb.IndexID) bool { - toSet := func(ids []descpb.IndexID) (s util.FastIntSet) { + toSet := func(ids []descpb.IndexID) (s intsets.FastIntSet) { for _, id := range ids { s.Add(int(id)) } @@ -465,7 +465,7 @@ type mergeKey struct { } func toMergeKey(m scexec.Merge) mergeKey { - var ids util.FastIntSet + var ids intsets.FastIntSet for _, id := range m.SourceIndexIDs { ids.Add(int(id)) } diff --git a/pkg/sql/schemachanger/scexec/gc_jobs.go b/pkg/sql/schemachanger/scexec/gc_jobs.go index b5a2abe3095f..196039c4e753 100644 --- a/pkg/sql/schemachanger/scexec/gc_jobs.go +++ b/pkg/sql/schemachanger/scexec/gc_jobs.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) @@ -82,7 +82,7 @@ func (gj gcJobs) makeRecords( ) (dbZoneConfigsToRemove catalog.DescriptorIDSet, gcJobRecords []jobs.Record) { type stmts struct { s []scop.StatementForDropJob - set util.FastIntSet + set intsets.FastIntSet } addStmt := func(s *stmts, stmt scop.StatementForDropJob) { if id := int(stmt.StatementID); !s.set.Contains(id) { diff --git a/pkg/sql/sem/builtins/BUILD.bazel b/pkg/sql/sem/builtins/BUILD.bazel index 296d4ed58596..f37c6a4dbb01 100644 --- a/pkg/sql/sem/builtins/BUILD.bazel +++ b/pkg/sql/sem/builtins/BUILD.bazel @@ -103,6 +103,7 @@ go_library( "//pkg/util/fuzzystrmatch", "//pkg/util/hlc", "//pkg/util/humanizeutil", + "//pkg/util/intsets", "//pkg/util/ipaddr", "//pkg/util/json", "//pkg/util/log", diff --git a/pkg/sql/sem/builtins/geo_builtins.go b/pkg/sql/sem/builtins/geo_builtins.go index 4f04ffef4641..2623a202c558 100644 --- a/pkg/sql/sem/builtins/geo_builtins.go +++ b/pkg/sql/sem/builtins/geo_builtins.go @@ -37,8 +37,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/storageparam" "github.com/cockroachdb/cockroach/pkg/sql/storageparam/indexstorageparam" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/json" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" @@ -7425,7 +7425,7 @@ func appendStrArgOverloadForGeometryArgOverloads(def builtinDefinition) builtinD } // Find all argument indexes that have the Geometry type. - var argsToCast util.FastIntSet + var argsToCast intsets.FastIntSet for i, paramType := range paramTypes { if paramType.Typ.Equal(types.Geometry) { argsToCast.Add(i) diff --git a/pkg/sql/sem/catid/BUILD.bazel b/pkg/sql/sem/catid/BUILD.bazel index e9bf1880b56f..1cbbca3f29b6 100644 --- a/pkg/sql/sem/catid/BUILD.bazel +++ b/pkg/sql/sem/catid/BUILD.bazel @@ -11,7 +11,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/sql/oidext", - "//pkg/util", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", "@com_github_lib_pq//oid", ], diff --git a/pkg/sql/sem/catid/index_id_set.go b/pkg/sql/sem/catid/index_id_set.go index a3c140130b63..9b9e3465ae35 100644 --- a/pkg/sql/sem/catid/index_id_set.go +++ b/pkg/sql/sem/catid/index_id_set.go @@ -10,11 +10,11 @@ package catid -import "github.com/cockroachdb/cockroach/pkg/util" +import "github.com/cockroachdb/cockroach/pkg/util/intsets" // IndexSet efficiently stores an unordered set of index ids. type IndexSet struct { - set util.FastIntSet + set intsets.FastIntSet } // MakeIndexIDSet returns a set initialized with the given values. diff --git a/pkg/sql/sem/tree/BUILD.bazel b/pkg/sql/sem/tree/BUILD.bazel index 0405257414fe..91c1eb48930e 100644 --- a/pkg/sql/sem/tree/BUILD.bazel +++ b/pkg/sql/sem/tree/BUILD.bazel @@ -143,6 +143,7 @@ go_library( "//pkg/util/duration", "//pkg/util/encoding", "//pkg/util/errorutil/unimplemented", + "//pkg/util/intsets", "//pkg/util/ipaddr", "//pkg/util/iterutil", "//pkg/util/json", diff --git a/pkg/sql/sem/tree/constant.go b/pkg/sql/sem/tree/constant.go index ff0a3c322253..d185123d781b 100644 --- a/pkg/sql/sem/tree/constant.go +++ b/pkg/sql/sem/tree/constant.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" ) @@ -441,7 +441,7 @@ func intersectTypeSlices(xs, ys []*types.T) (out []*types.T) { // The function takes a slice of Exprs and indexes, but expects all the indexed // Exprs to wrap a Constant. The reason it does no take a slice of Constants // instead is to avoid forcing callers to allocate separate slices of Constant. -func commonConstantType(vals []Expr, idxs util.FastIntSet) (*types.T, bool) { +func commonConstantType(vals []Expr, idxs intsets.FastIntSet) (*types.T, bool) { var candidates []*types.T for i, ok := idxs.Next(0); ok; i, ok = idxs.Next(i + 1) { diff --git a/pkg/sql/sem/tree/overload.go b/pkg/sql/sem/tree/overload.go index 8a7316a8700e..8e9106498d4c 100644 --- a/pkg/sql/sem/tree/overload.go +++ b/pkg/sql/sem/tree/overload.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/volatility" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" "github.com/lib/pq/oid" @@ -564,9 +564,9 @@ type overloadTypeChecker struct { overloadIdxs []uint8 // index into overloads exprs []Expr typedExprs []TypedExpr - resolvableIdxs util.FastIntSet // index into exprs/typedExprs - constIdxs util.FastIntSet // index into exprs/typedExprs - placeholderIdxs util.FastIntSet // index into exprs/typedExprs + resolvableIdxs intsets.FastIntSet // index into exprs/typedExprs + constIdxs intsets.FastIntSet // index into exprs/typedExprs + placeholderIdxs intsets.FastIntSet // index into exprs/typedExprs overloadsIdxArr [16]uint8 } @@ -617,9 +617,9 @@ func (s *overloadTypeChecker) release() { } s.typedExprs = s.typedExprs[:0] s.overloadIdxs = s.overloadIdxs[:0] - s.resolvableIdxs = util.FastIntSet{} - s.constIdxs = util.FastIntSet{} - s.placeholderIdxs = util.FastIntSet{} + s.resolvableIdxs = intsets.FastIntSet{} + s.constIdxs = intsets.FastIntSet{} + s.placeholderIdxs = intsets.FastIntSet{} overloadTypeCheckerPool.Put(s) } @@ -720,7 +720,7 @@ func (s *overloadTypeChecker) typeCheckOverloadedExprs( // Filter out overloads on resolved types. This includes resolved placeholders // and any other resolvable exprs. - var typeableIdxs = util.FastIntSet{} + var typeableIdxs = intsets.FastIntSet{} for i, ok := s.resolvableIdxs.Next(0); ok; i, ok = s.resolvableIdxs.Next(i + 1) { typeableIdxs.Add(i) } diff --git a/pkg/sql/sem/tree/type_check.go b/pkg/sql/sem/tree/type_check.go index a461ad10994e..53a65bf9b7ed 100644 --- a/pkg/sql/sem/tree/type_check.go +++ b/pkg/sql/sem/tree/type_check.go @@ -21,9 +21,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treecmp" "github.com/cockroachdb/cockroach/pkg/sql/sem/volatility" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/duration" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" @@ -1085,7 +1085,7 @@ func (expr *FuncExpr) TypeCheck( return nil, pgerror.Wrapf(err, pgcode.InvalidParameterValue, "%s()", def.Name) } - var calledOnNullInputFns, notCalledOnNullInputFns util.FastIntSet + var calledOnNullInputFns, notCalledOnNullInputFns intsets.FastIntSet for _, idx := range s.overloadIdxs { if def.Overloads[idx].CalledOnNullInput { calledOnNullInputFns.Add(int(idx)) @@ -1107,7 +1107,7 @@ func (expr *FuncExpr) TypeCheck( if funcCls == AggregateClass { for i := range s.typedExprs { if s.typedExprs[i].ResolvedType().Family() == types.UnknownFamily { - var filtered util.FastIntSet + var filtered intsets.FastIntSet for j, ok := notCalledOnNullInputFns.Next(0); ok; j, ok = notCalledOnNullInputFns.Next(j + 1) { if def.Overloads[j].params().GetAt(i).Equivalent(types.String) { filtered.Add(j) @@ -2356,9 +2356,9 @@ type typeCheckExprsState struct { exprs []Expr typedExprs []TypedExpr - constIdxs util.FastIntSet // index into exprs/typedExprs - placeholderIdxs util.FastIntSet // index into exprs/typedExprs - resolvableIdxs util.FastIntSet // index into exprs/typedExprs + constIdxs intsets.FastIntSet // index into exprs/typedExprs + placeholderIdxs intsets.FastIntSet // index into exprs/typedExprs + resolvableIdxs intsets.FastIntSet // index into exprs/typedExprs } // typeCheckSameTypedExprs type checks a list of expressions, asserting that all @@ -2588,7 +2588,11 @@ func typeCheckConstsAndPlaceholdersWithDesired( // - All other Exprs func typeCheckSplitExprs( exprs []Expr, -) (constIdxs util.FastIntSet, placeholderIdxs util.FastIntSet, resolvableIdxs util.FastIntSet) { +) ( + constIdxs intsets.FastIntSet, + placeholderIdxs intsets.FastIntSet, + resolvableIdxs intsets.FastIntSet, +) { for i, expr := range exprs { switch { case isConstant(expr): diff --git a/pkg/sql/span/BUILD.bazel b/pkg/sql/span/BUILD.bazel index f786e403e737..875a7a473306 100644 --- a/pkg/sql/span/BUILD.bazel +++ b/pkg/sql/span/BUILD.bazel @@ -23,8 +23,8 @@ go_library( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util", "//pkg/util/encoding", + "//pkg/util/intsets", "@com_github_cockroachdb_errors//:errors", ], ) @@ -47,7 +47,7 @@ go_test( "//pkg/sql/catalog/systemschema", "//pkg/sql/tests", "//pkg/testutils/serverutils", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/randutil", diff --git a/pkg/sql/span/span_splitter.go b/pkg/sql/span/span_splitter.go index effb5cdc4932..3adad02531d4 100644 --- a/pkg/sql/span/span_splitter.go +++ b/pkg/sql/span/span_splitter.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/errors" ) @@ -46,7 +46,7 @@ func NoopSplitter() Splitter { // the NoopSplitter (which never splits). // Note: this splitter should **not** be used for deletes. func MakeSplitter( - table catalog.TableDescriptor, index catalog.Index, neededColOrdinals util.FastIntSet, + table catalog.TableDescriptor, index catalog.Index, neededColOrdinals intsets.FastIntSet, ) Splitter { return MakeSplitterForDelete(table, index, neededColOrdinals, false /* forDelete */) } @@ -56,7 +56,7 @@ func MakeSplitter( func MakeSplitterForDelete( table catalog.TableDescriptor, index catalog.Index, - neededColOrdinals util.FastIntSet, + neededColOrdinals intsets.FastIntSet, forDelete bool, ) Splitter { // We can only split a span into separate family specific point lookups if: diff --git a/pkg/sql/span/span_splitter_test.go b/pkg/sql/span/span_splitter_test.go index a7005240f426..0a527f047795 100644 --- a/pkg/sql/span/span_splitter_test.go +++ b/pkg/sql/span/span_splitter_test.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -30,7 +30,7 @@ func TestSpanSplitterDoesNotSplitSystemTableFamilySpans(t *testing.T) { splitter := span.MakeSplitter( systemschema.DescriptorTable, systemschema.DescriptorTable.GetPrimaryIndex(), - util.MakeFastIntSet(0), + intsets.MakeFastIntSet(0), ) if res := splitter.CanSplitSpanIntoFamilySpans(1, false); res { @@ -49,7 +49,7 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql string index string prefixLen int - neededColumns util.FastIntSet + neededColumns intsets.FastIntSet containsNull bool canSplit bool }{ @@ -57,35 +57,35 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql: "a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), FAMILY (a, b, c), FAMILY (d)", index: "t_pkey", prefixLen: 2, - neededColumns: util.MakeFastIntSet(0), + neededColumns: intsets.MakeFastIntSet(0), canSplit: true, }, { sql: "a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), FAMILY (a, b, c), FAMILY (d)", index: "t_pkey", prefixLen: 1, - neededColumns: util.MakeFastIntSet(0), + neededColumns: intsets.MakeFastIntSet(0), canSplit: false, }, { sql: "a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), FAMILY (a, b, c, d)", index: "t_pkey", prefixLen: 2, - neededColumns: util.MakeFastIntSet(0), + neededColumns: intsets.MakeFastIntSet(0), canSplit: true, }, { sql: "a INT, b INT, c INT, INDEX i (b) STORING (a, c), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: util.MakeFastIntSet(0), + neededColumns: intsets.MakeFastIntSet(0), canSplit: false, }, { sql: "a INT, b INT, c INT, UNIQUE INDEX i (b) STORING (a, c), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: util.MakeFastIntSet(0), + neededColumns: intsets.MakeFastIntSet(0), containsNull: true, canSplit: false, }, @@ -93,7 +93,7 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql: "a INT, b INT, c INT, UNIQUE INDEX i (b) STORING (a, c), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: util.MakeFastIntSet(0), + neededColumns: intsets.MakeFastIntSet(0), containsNull: false, canSplit: true, }, @@ -101,7 +101,7 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql: "a INT, b INT, c INT, UNIQUE INDEX i (b), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: util.MakeFastIntSet(0), + neededColumns: intsets.MakeFastIntSet(0), containsNull: false, canSplit: true, }, @@ -109,7 +109,7 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql: "a INT, b INT, c INT, UNIQUE INDEX i (b), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: util.MakeFastIntSet(0), + neededColumns: intsets.MakeFastIntSet(0), containsNull: true, canSplit: false, }, diff --git a/pkg/sql/sqlstats/insights/BUILD.bazel b/pkg/sql/sqlstats/insights/BUILD.bazel index 77b99b00a8a5..89033421da7e 100644 --- a/pkg/sql/sqlstats/insights/BUILD.bazel +++ b/pkg/sql/sqlstats/insights/BUILD.bazel @@ -25,8 +25,8 @@ go_library( "//pkg/settings/cluster", "//pkg/sql/clusterunique", "//pkg/sql/contention/contentionutils", - "//pkg/util", "//pkg/util/cache", + "//pkg/util/intsets", "//pkg/util/metric", "//pkg/util/quantile", "//pkg/util/stop", diff --git a/pkg/sql/sqlstats/insights/registry.go b/pkg/sql/sqlstats/insights/registry.go index 022bfe5054f0..11dfbf36ae11 100644 --- a/pkg/sql/sqlstats/insights/registry.go +++ b/pkg/sql/sqlstats/insights/registry.go @@ -15,7 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // This registry is the central object in the insights subsystem. It observes @@ -73,7 +73,7 @@ func (r *lockingRegistry) ObserveTransaction(sessionID clusterunique.ID, transac delete(r.statements, sessionID) defer statements.release() - var slowStatements util.FastIntSet + var slowStatements intsets.FastIntSet for i, s := range *statements { if r.detector.isSlow(s) { slowStatements.Add(i) diff --git a/pkg/sql/stats/BUILD.bazel b/pkg/sql/stats/BUILD.bazel index e379aa56f5d8..ab093ad4edd9 100644 --- a/pkg/sql/stats/BUILD.bazel +++ b/pkg/sql/stats/BUILD.bazel @@ -46,10 +46,10 @@ go_library( "//pkg/sql/sqlerrors", "//pkg/sql/sqlutil", "//pkg/sql/types", - "//pkg/util", "//pkg/util/cache", "//pkg/util/encoding", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/mon", "//pkg/util/protoutil", @@ -118,8 +118,8 @@ go_test( "//pkg/testutils/skip", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", - "//pkg/util", "//pkg/util/encoding", + "//pkg/util/intsets", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/mon", diff --git a/pkg/sql/stats/row_sampling.go b/pkg/sql/stats/row_sampling.go index 1220539d2f61..3286b9f4d6bd 100644 --- a/pkg/sql/stats/row_sampling.go +++ b/pkg/sql/stats/row_sampling.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/errors" ) @@ -61,7 +61,7 @@ type SampleReservoir struct { // sampleCols contains the ordinals of columns that should be sampled from // each row. Note that the sampled rows still contain all columns, but // any columns not part of this set are given a null value. - sampleCols util.FastIntSet + sampleCols intsets.FastIntSet } var _ heap.Interface = &SampleReservoir{} @@ -71,7 +71,7 @@ func (sr *SampleReservoir) Init( numSamples, minNumSamples int, colTypes []*types.T, memAcc *mon.BoundAccount, - sampleCols util.FastIntSet, + sampleCols intsets.FastIntSet, ) { if minNumSamples < 1 || minNumSamples > numSamples { minNumSamples = numSamples diff --git a/pkg/sql/stats/row_sampling_test.go b/pkg/sql/stats/row_sampling_test.go index d80baed1e5a3..a3eaefb77aba 100644 --- a/pkg/sql/stats/row_sampling_test.go +++ b/pkg/sql/stats/row_sampling_test.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/randutil" ) @@ -39,7 +39,7 @@ func runSampleTest( ) { ctx := context.Background() var sr SampleReservoir - sr.Init(numSamples, 1, []*types.T{types.Int}, memAcc, util.MakeFastIntSet(0)) + sr.Init(numSamples, 1, []*types.T{types.Int}, memAcc, intsets.MakeFastIntSet(0)) for _, r := range ranks { d := rowenc.DatumToEncDatum(types.Int, tree.NewDInt(tree.DInt(r))) prevCapacity := sr.Cap() diff --git a/pkg/sql/stats/util.go b/pkg/sql/stats/util.go index b6e2173e3200..796ac82a1887 100644 --- a/pkg/sql/stats/util.go +++ b/pkg/sql/stats/util.go @@ -12,13 +12,13 @@ package stats import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // MakeSortedColStatKey constructs a unique key representing cols that can be // used as the key in a map, and also sorts cols as a side-effect. func MakeSortedColStatKey(cols []descpb.ColumnID) string { - var colSet util.FastIntSet + var colSet intsets.FastIntSet for _, c := range cols { colSet.Add(int(c)) } diff --git a/pkg/sql/stmtdiagnostics/BUILD.bazel b/pkg/sql/stmtdiagnostics/BUILD.bazel index bfd5f018d883..77bc69161ff2 100644 --- a/pkg/sql/stmtdiagnostics/BUILD.bazel +++ b/pkg/sql/stmtdiagnostics/BUILD.bazel @@ -17,7 +17,7 @@ go_library( "//pkg/sql/sessiondata", "//pkg/sql/sqlutil", "//pkg/sql/types", - "//pkg/util", + "//pkg/util/intsets", "//pkg/util/log", "//pkg/util/stop", "//pkg/util/syncutil", diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics.go b/pkg/sql/stmtdiagnostics/statement_diagnostics.go index c0d047b42ce6..b2f5396f83bd 100644 --- a/pkg/sql/stmtdiagnostics/statement_diagnostics.go +++ b/pkg/sql/stmtdiagnostics/statement_diagnostics.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -700,7 +700,7 @@ func (r *Registry) pollRequests(ctx context.Context) error { defer r.mu.Unlock() now := timeutil.Now() - var ids util.FastIntSet + var ids intsets.FastIntSet for _, row := range rows { id := RequestID(*row[0].(*tree.DInt)) stmtFingerprint := string(*row[1].(*tree.DString)) diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index 69af5781b4d9..93822847b2f5 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -33,8 +33,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/retry" @@ -321,7 +321,7 @@ func cleanupTempSchemaObjects( if err != nil { return err } - dependentColIDs := util.MakeFastIntSet() + dependentColIDs := intsets.MakeFastIntSet() for _, colID := range d.ColumnIDs { dependentColIDs.Add(int(colID)) } diff --git a/pkg/sql/type_change.go b/pkg/sql/type_change.go index f1492a8ec4d8..b108c39cb2af 100644 --- a/pkg/sql/type_change.go +++ b/pkg/sql/type_change.go @@ -37,7 +37,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" @@ -1006,7 +1006,7 @@ func findUsagesOfEnumValueInPartitioning( return false, nil } - var colsToCheck util.FastIntSet + var colsToCheck intsets.FastIntSet for i, c := range columns[:partitioning.NumColumns()] { typT := c.GetType() if !typT.UserDefined() { @@ -1080,7 +1080,7 @@ func findUsageOfEnumValueInEncodedPartitioningValue( partitioning catalog.Partitioning, v []byte, fakePrefixDatums []tree.Datum, - colsToCheck util.FastIntSet, + colsToCheck intsets.FastIntSet, foundUsage bool, member *descpb.TypeDescriptor_EnumMember, ) (bool, error) { diff --git a/pkg/upgrade/upgrades/alter_jobs_add_job_type_test.go b/pkg/upgrade/upgrades/alter_jobs_add_job_type_test.go index 1f42bafd00fc..18b081fed0ea 100644 --- a/pkg/upgrade/upgrades/alter_jobs_add_job_type_test.go +++ b/pkg/upgrade/upgrades/alter_jobs_add_job_type_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/upgrade/upgrades" - "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/assert" @@ -171,7 +171,7 @@ func TestAlterSystemJobsTableAddJobTypeColumn(t *testing.T) { var typStr string rows, err := sqlDB.Query("SELECT distinct(job_type) FROM system.jobs") require.NoError(t, err) - seenTypes := util.FastIntSet{} + var seenTypes intsets.FastIntSet for rows.Next() { err = rows.Scan(&typStr) require.NoError(t, err) diff --git a/pkg/util/BUILD.bazel b/pkg/util/BUILD.bazel index f911216fba3f..915af7ce85ad 100644 --- a/pkg/util/BUILD.bazel +++ b/pkg/util/BUILD.bazel @@ -12,10 +12,6 @@ go_library( "every_n.go", "fast_int_map.go", "fast_int_set.go", # keep - "fast_int_set_large.go", - "fast_int_set_small.go", - "fast_int_set_str.go", - "fast_int_set_testonly.go", "hash.go", "nocopy.go", "pluralize.go", @@ -51,7 +47,6 @@ go_test( srcs = [ "every_n_test.go", "fast_int_map_test.go", - "fast_int_set_test.go", "slices_test.go", "smalltrace_test.go", "strings_test.go", diff --git a/pkg/util/intsets/BUILD.bazel b/pkg/util/intsets/BUILD.bazel new file mode 100644 index 000000000000..b0ed5e9d4445 --- /dev/null +++ b/pkg/util/intsets/BUILD.bazel @@ -0,0 +1,28 @@ +load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "intsets", + srcs = [ + "fast_int_set_large.go", + "fast_int_set_small.go", + "fast_int_set_str.go", + "fast_int_set_testonly.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/util/intsets", + visibility = ["//visibility:public"], + deps = [ + "@com_github_cockroachdb_errors//:errors", + "@org_golang_x_tools//container/intsets", + ], +) + +go_test( + name = "intsets_test", + srcs = ["fast_int_set_test.go"], + args = ["-test.timeout=295s"], + embed = [":intsets"], + deps = ["//pkg/util/randutil"], +) + +get_x_data(name = "get_x_data") diff --git a/pkg/util/fast_int_set.go b/pkg/util/intsets/fast_int_set.go similarity index 98% rename from pkg/util/fast_int_set.go rename to pkg/util/intsets/fast_int_set.go index ac4ae0a40c62..da7dccdc28bf 100644 --- a/pkg/util/fast_int_set.go +++ b/pkg/util/intsets/fast_int_set.go @@ -11,7 +11,7 @@ //go:build !fast_int_set_small && !fast_int_set_large // +build !fast_int_set_small,!fast_int_set_large -package util +package intsets import ( "bytes" @@ -66,6 +66,13 @@ type FastIntSet struct { // Note: this can be set to a smaller value, e.g. for testing. const smallCutoff = 128 +const ( + // MaxInt is the maximum integer that a set can contain. + MaxInt = intsets.MaxInt + // MinInt is the maximum integer that a set can contain. + MinInt = intsets.MinInt +) + // bitmap implements a bitmap of size smallCutoff. type bitmap struct { // We don't use an array because that makes Go always keep the struct on the diff --git a/pkg/util/fast_int_set_large.go b/pkg/util/intsets/fast_int_set_large.go similarity index 96% rename from pkg/util/fast_int_set_large.go rename to pkg/util/intsets/fast_int_set_large.go index 6533afb7849e..456f4efce046 100644 --- a/pkg/util/fast_int_set_large.go +++ b/pkg/util/intsets/fast_int_set_large.go @@ -11,6 +11,6 @@ //go:build fast_int_set_large // +build fast_int_set_large -package util +package intsets var fastIntSetAlwaysSmall = false diff --git a/pkg/util/fast_int_set_small.go b/pkg/util/intsets/fast_int_set_small.go similarity index 96% rename from pkg/util/fast_int_set_small.go rename to pkg/util/intsets/fast_int_set_small.go index ca9e6b0b085a..8cc1e69857c8 100644 --- a/pkg/util/fast_int_set_small.go +++ b/pkg/util/intsets/fast_int_set_small.go @@ -11,6 +11,6 @@ //go:build fast_int_set_small // +build fast_int_set_small -package util +package intsets var fastIntSetAlwaysSmall = true diff --git a/pkg/util/fast_int_set_str.go b/pkg/util/intsets/fast_int_set_str.go similarity index 98% rename from pkg/util/fast_int_set_str.go rename to pkg/util/intsets/fast_int_set_str.go index 37c883092936..d47ca1bab35e 100644 --- a/pkg/util/fast_int_set_str.go +++ b/pkg/util/intsets/fast_int_set_str.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package util +package intsets import ( "bytes" diff --git a/pkg/util/fast_int_set_test.go b/pkg/util/intsets/fast_int_set_test.go similarity index 99% rename from pkg/util/fast_int_set_test.go rename to pkg/util/intsets/fast_int_set_test.go index 87b58c686e65..eb9743496056 100644 --- a/pkg/util/fast_int_set_test.go +++ b/pkg/util/intsets/fast_int_set_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package util +package intsets import ( "bytes" diff --git a/pkg/util/fast_int_set_testonly.go b/pkg/util/intsets/fast_int_set_testonly.go similarity index 99% rename from pkg/util/fast_int_set_testonly.go rename to pkg/util/intsets/fast_int_set_testonly.go index da522a754364..645d12259cb4 100644 --- a/pkg/util/fast_int_set_testonly.go +++ b/pkg/util/intsets/fast_int_set_testonly.go @@ -16,7 +16,7 @@ // fastIntSetAlwaysSmall). Tests that exhibit a difference when using one of // these variants indicates a bug. -package util +package intsets import ( "bytes" diff --git a/pkg/util/json/BUILD.bazel b/pkg/util/json/BUILD.bazel index eaa2ce2faf86..5d723de04667 100644 --- a/pkg/util/json/BUILD.bazel +++ b/pkg/util/json/BUILD.bazel @@ -28,6 +28,7 @@ go_library( "//pkg/sql/pgwire/pgerror", "//pkg/util", "//pkg/util/encoding", + "//pkg/util/intsets", "//pkg/util/json/tokenizer", "//pkg/util/randutil", "//pkg/util/syncutil", diff --git a/pkg/util/json/json.go b/pkg/util/json/json.go index d2933e7e9c11..150431e6f2c3 100644 --- a/pkg/util/json/json.go +++ b/pkg/util/json/json.go @@ -30,8 +30,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/inverted" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/intsets" uniq "github.com/cockroachdb/cockroach/pkg/util/unique" "github.com/cockroachdb/errors" ) @@ -404,7 +404,7 @@ func (b *ObjectBuilder) Build() JSON { type FixedKeysObjectBuilder struct { pairs []jsonKeyValuePair keyOrd map[string]int - updated util.FastIntSet + updated intsets.FastIntSet } // NewFixedKeysObjectBuilder creates JSON object builder for the specified @@ -448,7 +448,7 @@ func (b *FixedKeysObjectBuilder) Build() (JSON, error) { "expected all %d keys to be updated, %d updated", len(b.pairs), b.updated.Len()) } - b.updated = util.FastIntSet{} + b.updated = intsets.FastIntSet{} // Must copy b.pairs in case builder is reused. return jsonObject(append([]jsonKeyValuePair(nil), b.pairs...)), nil } From fad7ac8303582228b010375c60820d4a6a798570 Mon Sep 17 00:00:00 2001 From: Marcus Gartner Date: Fri, 14 Oct 2022 12:56:16 -0400 Subject: [PATCH 2/7] intsets: rename FastIntSet to Fast Release note: None --- .../schemafeed/table_event_filter.go | 8 +-- .../tenantcostserver/server_test.go | 4 +- .../roachtest/tests/multitenant_distsql.go | 4 +- pkg/col/coldata/batch.go | 2 +- .../kvcoord/dist_sender_mux_rangefeed.go | 2 +- .../kvserver/closedts/sidetransport/sender.go | 2 +- pkg/roachprod/install/nodes.go | 2 +- pkg/roachprod/vm/local/local.go | 2 +- pkg/sql/backfill/index_backfiller_cols.go | 4 +- pkg/sql/catalog/bootstrap/BUILD.bazel | 1 + pkg/sql/catalog/bootstrap/kv_writer.go | 3 +- pkg/sql/catalog/descpb/structured.go | 4 +- pkg/sql/catalog/descriptor_id_set.go | 2 +- pkg/sql/catalog/descs/hydrate.go | 2 +- .../internal/validate/schema_changer_state.go | 10 +-- .../catalog/post_deserialization_changes.go | 2 +- pkg/sql/catalog/table_col_set.go | 2 +- pkg/sql/catalog/table_col_set_test.go | 2 +- pkg/sql/catalog/table_elements.go | 2 +- pkg/sql/catalog/tabledesc/structured.go | 2 +- pkg/sql/catalog/tabledesc/validate.go | 4 +- pkg/sql/check.go | 2 +- pkg/sql/colencoding/key_encoding.go | 2 +- pkg/sql/colexec/colbuilder/execplan.go | 2 +- .../colexecspan/span_assembler_test.go | 2 +- pkg/sql/colfetcher/cfetcher.go | 6 +- pkg/sql/colfetcher/index_join.go | 2 +- pkg/sql/colmem/allocator.go | 6 +- pkg/sql/delegate/show_grants.go | 2 +- pkg/sql/delete_preserving_index_test.go | 2 +- pkg/sql/distinct.go | 4 +- pkg/sql/distsql_physical_planner.go | 10 +-- pkg/sql/drop_function.go | 2 +- pkg/sql/exec_factory_util.go | 2 +- pkg/sql/execinfra/processorsbase.go | 2 +- pkg/sql/execinfrapb/component_stats.go | 4 +- pkg/sql/importer/read_import_pgdump.go | 4 +- pkg/sql/insert_fast_path.go | 2 +- pkg/sql/instrumentation.go | 2 +- pkg/sql/opt/colset.go | 6 +- pkg/sql/opt/colset_test.go | 2 +- pkg/sql/opt/constraint/constraint_test.go | 2 +- pkg/sql/opt/doc.go | 2 +- pkg/sql/opt/exec/execbuilder/mutation.go | 4 +- pkg/sql/opt/exec/explain/plan_gist_factory.go | 4 +- pkg/sql/opt/exec/factory.go | 6 +- pkg/sql/opt/indexrec/candidate.go | 2 +- pkg/sql/opt/indexrec/hypothetical_index.go | 2 +- pkg/sql/opt/indexrec/hypothetical_table.go | 2 +- pkg/sql/opt/indexrec/rec.go | 37 +++++----- pkg/sql/opt/lookupjoin/constraint_builder.go | 2 +- pkg/sql/opt/memo/expr.go | 2 +- pkg/sql/opt/memo/extract.go | 2 +- pkg/sql/opt/memo/interner_test.go | 8 +-- pkg/sql/opt/metadata.go | 2 +- pkg/sql/opt/norm/factory.go | 4 +- pkg/sql/opt/norm/inline_funcs.go | 4 +- pkg/sql/opt/norm/project_funcs.go | 2 +- pkg/sql/opt/norm/prune_cols_funcs.go | 2 +- pkg/sql/opt/norm/reject_nulls_funcs.go | 6 +- pkg/sql/opt/optbuilder/arbiter_set.go | 10 +-- pkg/sql/opt/optbuilder/create_function.go | 4 +- pkg/sql/opt/optbuilder/create_view.go | 2 +- pkg/sql/opt/optbuilder/fk_cascade.go | 2 +- pkg/sql/opt/optbuilder/insert.go | 6 +- pkg/sql/opt/optbuilder/join.go | 6 +- pkg/sql/opt/optbuilder/mutation_builder.go | 12 ++-- .../optbuilder/mutation_builder_arbiter.go | 10 +-- .../opt/optbuilder/mutation_builder_unique.go | 6 +- pkg/sql/opt/partialidx/implicator.go | 2 +- pkg/sql/opt/partition/locality.go | 10 +-- pkg/sql/opt/partition/locality_test.go | 2 +- pkg/sql/opt/props/func_dep.go | 2 +- pkg/sql/opt/props/physical/distribution.go | 2 +- pkg/sql/opt/schema_dependencies.go | 4 +- pkg/sql/opt/testutils/opttester/opt_tester.go | 10 +-- pkg/sql/opt/testutils/testcat/create_table.go | 2 +- pkg/sql/opt/xform/coster.go | 12 ++-- pkg/sql/opt/xform/explorer.go | 2 +- pkg/sql/opt/xform/join_funcs.go | 8 +-- pkg/sql/opt/xform/join_order_builder.go | 2 +- pkg/sql/opt/xform/optimizer.go | 6 +- pkg/sql/opt/xform/scan_funcs.go | 6 +- pkg/sql/opt/xform/select_funcs.go | 2 +- pkg/sql/opt_exec_factory.go | 2 +- pkg/sql/physicalplan/physical_plan.go | 2 +- pkg/sql/row/fetcher.go | 2 +- pkg/sql/row/helper.go | 4 +- pkg/sql/row/partial_index.go | 4 +- pkg/sql/row/row_converter.go | 2 +- pkg/sql/rowenc/index_encoding.go | 8 +-- pkg/sql/rowexec/aggregator_test.go | 2 +- pkg/sql/rowexec/joinreader_test.go | 4 +- pkg/sql/rowexec/mergejoiner.go | 4 +- pkg/sql/rowexec/sample_aggregator.go | 4 +- pkg/sql/rowexec/sampler.go | 4 +- pkg/sql/schemachanger/rel/query.go | 2 +- pkg/sql/schemachanger/rel/query_data.go | 2 +- pkg/sql/schemachanger/rel/query_eval.go | 6 +- pkg/sql/schemachanger/rel/reltest/database.go | 4 +- .../scexec/backfiller/tracker.go | 4 +- pkg/sql/schemachanger/scexec/gc_jobs.go | 2 +- pkg/sql/sem/builtins/geo_builtins.go | 2 +- pkg/sql/sem/catid/index_id_set.go | 2 +- pkg/sql/sem/tree/constant.go | 2 +- pkg/sql/sem/tree/overload.go | 14 ++-- pkg/sql/sem/tree/type_check.go | 16 ++--- pkg/sql/span/span_splitter.go | 4 +- pkg/sql/span/span_splitter_test.go | 20 +++--- pkg/sql/sqlstats/insights/registry.go | 2 +- pkg/sql/stats/row_sampling.go | 4 +- pkg/sql/stats/row_sampling_test.go | 2 +- pkg/sql/stats/util.go | 2 +- .../stmtdiagnostics/statement_diagnostics.go | 2 +- pkg/sql/temporary_schema.go | 2 +- pkg/sql/type_change.go | 4 +- .../upgrades/alter_jobs_add_job_type_test.go | 2 +- pkg/util/intsets/BUILD.bazel | 10 +-- pkg/util/intsets/{fast_int_set.go => fast.go} | 68 +++++++++--------- .../{fast_int_set_large.go => fast_large.go} | 0 .../{fast_int_set_small.go => fast_small.go} | 0 .../{fast_int_set_str.go => fast_str.go} | 2 +- .../{fast_int_set_test.go => fast_test.go} | 32 ++++----- ...t_int_set_testonly.go => fast_testonly.go} | 70 +++++++++---------- pkg/util/json/json.go | 4 +- 125 files changed, 340 insertions(+), 345 deletions(-) rename pkg/util/intsets/{fast_int_set.go => fast.go} (88%) rename pkg/util/intsets/{fast_int_set_large.go => fast_large.go} (100%) rename pkg/util/intsets/{fast_int_set_small.go => fast_small.go} (100%) rename pkg/util/intsets/{fast_int_set_str.go => fast_str.go} (97%) rename pkg/util/intsets/{fast_int_set_test.go => fast_test.go} (93%) rename pkg/util/intsets/{fast_int_set_testonly.go => fast_testonly.go} (79%) diff --git a/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go b/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go index 72ec32ad2216..7a8c1cabe47f 100644 --- a/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go +++ b/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go @@ -199,7 +199,7 @@ func droppedColumnIsWatched(e TableEvent, targets changefeedbase.Targets) (bool, return true, nil } - var watchedColumnIDs intsets.FastIntSet + var watchedColumnIDs intsets.Fast if err := e.Before.ForeachFamily(func(family *descpb.ColumnFamilyDescriptor) error { if _, ok := specifiedColumnFamiliesForTable[family.Name]; ok { for _, columnID := range family.ColumnIDs { @@ -235,11 +235,11 @@ func addedColumnIsWatched(e TableEvent, targets changefeedbase.Targets) (bool, e return false, nil } - var beforeCols intsets.FastIntSet + var beforeCols intsets.Fast for _, col := range e.Before.VisibleColumns() { beforeCols.Add(int(col.GetID())) } - var addedCols intsets.FastIntSet + var addedCols intsets.Fast for _, col := range e.After.VisibleColumns() { colID := int(col.GetID()) if !beforeCols.Contains(colID) { @@ -358,7 +358,7 @@ func hasNewPrimaryIndexWithNoVisibleColumnChanges( ) (cols catalog.TableColSet) { // Generate a set of watched columns if the targets contains specific columns. - var targetedCols intsets.FastIntSet + var targetedCols intsets.Fast if hasSpecificColumnTargets { err := tab.ForeachFamily(func(fam *descpb.ColumnFamilyDescriptor) error { if _, ok := targetFamilies[fam.Name]; ok { diff --git a/pkg/ccl/multitenantccl/tenantcostserver/server_test.go b/pkg/ccl/multitenantccl/tenantcostserver/server_test.go index f539ca70d541..b11708d3857b 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/server_test.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/server_test.go @@ -327,7 +327,7 @@ func TestInstanceCleanup(t *testing.T) { // Note: this number needs to be at most maxInstancesCleanup. const maxInstances = 10 - var liveset, prev intsets.FastIntSet + var liveset, prev intsets.Fast for steps := 0; steps < 100; steps++ { // Keep the previous set for debugging. @@ -370,7 +370,7 @@ func TestInstanceCleanup(t *testing.T) { rows := ts.r.Query(t, "SELECT instance_id FROM system.tenant_usage WHERE tenant_id = 5 AND instance_id > 0", ) - var serverSet intsets.FastIntSet + var serverSet intsets.Fast for rows.Next() { var id int if err := rows.Scan(&id); err != nil { diff --git a/pkg/cmd/roachtest/tests/multitenant_distsql.go b/pkg/cmd/roachtest/tests/multitenant_distsql.go index 3b60b3f0384b..b82631107c99 100644 --- a/pkg/cmd/roachtest/tests/multitenant_distsql.go +++ b/pkg/cmd/roachtest/tests/multitenant_distsql.go @@ -95,7 +95,7 @@ func runMultiTenantDistSQL( require.NoError(t, err) // Create numInstances sql pods and spread them evenly across the machines. - var nodes intsets.FastIntSet + var nodes intsets.Fast nodes.Add(1) for i := 1; i < numInstances; i++ { node := ((i + 1) % c.Spec().NodeCount) + 1 @@ -156,7 +156,7 @@ func runMultiTenantDistSQL( continue } - var nodesInPlan intsets.FastIntSet + var nodesInPlan intsets.Fast for res.Next() { str := "" err = res.Scan(&str) diff --git a/pkg/col/coldata/batch.go b/pkg/col/coldata/batch.go index c146288c5e7f..172d417deba7 100644 --- a/pkg/col/coldata/batch.go +++ b/pkg/col/coldata/batch.go @@ -210,7 +210,7 @@ type MemBatch struct { // b is the slice of columns in this batch. b []Vec // datumVecIdxs stores the indices of all datum-backed vectors in b. - datumVecIdxs intsets.FastIntSet + datumVecIdxs intsets.Fast useSel bool // sel is - if useSel is true - a selection vector from upstream. A // selection vector is a list of selected tuple indices in this memBatch's diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_mux_rangefeed.go b/pkg/kv/kvclient/kvcoord/dist_sender_mux_rangefeed.go index dd213b5da02f..4f72216ec51a 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_mux_rangefeed.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_mux_rangefeed.go @@ -55,7 +55,7 @@ type rangefeedMuxer struct { type muxClientState struct { client roachpb.Internal_MuxRangeFeedClient - streams intsets.FastIntSet + streams intsets.Fast cancel context.CancelFunc } diff --git a/pkg/kv/kvserver/closedts/sidetransport/sender.go b/pkg/kv/kvserver/closedts/sidetransport/sender.go index b1c742d28c1e..3644bb5f7b8e 100644 --- a/pkg/kv/kvserver/closedts/sidetransport/sender.go +++ b/pkg/kv/kvserver/closedts/sidetransport/sender.go @@ -356,7 +356,7 @@ func (s *Sender) publish(ctx context.Context) hlc.ClockTimestamp { // We'll accumulate all the nodes we need to connect to in order to check if // we need to open new connections or close existing ones. - nodesWithFollowers := intsets.MakeFastIntSet() + nodesWithFollowers := intsets.MakeFast() // If there's any tracked ranges for which we're not the leaseholder any more, // we need to untrack them and tell the connections about it. diff --git a/pkg/roachprod/install/nodes.go b/pkg/roachprod/install/nodes.go index 7ea3fc061fdc..9642a2e09e26 100644 --- a/pkg/roachprod/install/nodes.go +++ b/pkg/roachprod/install/nodes.go @@ -48,7 +48,7 @@ func ListNodes(s string, numNodesInCluster int) (Nodes, error) { return allNodes(numNodesInCluster), nil } - var set intsets.FastIntSet + var set intsets.Fast for _, p := range strings.Split(s, ",") { parts := strings.Split(p, "-") switch len(parts) { diff --git a/pkg/roachprod/vm/local/local.go b/pkg/roachprod/vm/local/local.go index 60046bbc6b4e..e047fe665ca2 100644 --- a/pkg/roachprod/vm/local/local.go +++ b/pkg/roachprod/vm/local/local.go @@ -158,7 +158,7 @@ func (p *Provider) Create( // We will need to assign ports to the nodes, and they must not conflict with // any other local clusters. - var portsTaken intsets.FastIntSet + var portsTaken intsets.Fast for _, c := range p.clusters { for i := range c.VMs { portsTaken.Add(c.VMs[i].SQLPort) diff --git a/pkg/sql/backfill/index_backfiller_cols.go b/pkg/sql/backfill/index_backfiller_cols.go index 77b20e707cbd..7013b2fc8901 100644 --- a/pkg/sql/backfill/index_backfiller_cols.go +++ b/pkg/sql/backfill/index_backfiller_cols.go @@ -45,7 +45,7 @@ type indexBackfillerCols struct { // valNeededForCol contains the indexes (into cols) of all columns that we // need to fetch values for. - valNeededForCol intsets.FastIntSet + valNeededForCol intsets.Fast } // makeIndexBackfillColumns computes the set of writable columns and @@ -141,7 +141,7 @@ func makeIndexBackfillColumns( // because of references in expressions. func makeInitialValNeededForCol( ib indexBackfillerCols, addedIndexes []catalog.Index, -) (valNeededForCol intsets.FastIntSet) { +) (valNeededForCol intsets.Fast) { // Any columns we're going to eval, we don't need values for ahead of time. toEval := func() catalog.TableColSet { columnIDs := func(columns []catalog.Column) (s catalog.TableColSet) { diff --git a/pkg/sql/catalog/bootstrap/BUILD.bazel b/pkg/sql/catalog/bootstrap/BUILD.bazel index c37f5bd77311..fd9646284f93 100644 --- a/pkg/sql/catalog/bootstrap/BUILD.bazel +++ b/pkg/sql/catalog/bootstrap/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//pkg/sql/sem/catconstants", "//pkg/sql/sem/catid", "//pkg/sql/sem/tree", + "//pkg/util/intsets", "//pkg/util/iterutil", "//pkg/util/log", "//pkg/util/protoutil", diff --git a/pkg/sql/catalog/bootstrap/kv_writer.go b/pkg/sql/catalog/bootstrap/kv_writer.go index ad8e893ab045..f4525236df7a 100644 --- a/pkg/sql/catalog/bootstrap/kv_writer.go +++ b/pkg/sql/catalog/bootstrap/kv_writer.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -34,7 +35,7 @@ type KVWriter struct { codec keys.SQLCodec tableDesc catalog.TableDescriptor colIDtoRowIndex catalog.TableColMap - skippedFamilyIDs intsets.FastIntSet + skippedFamilyIDs intsets.Fast } // RecordToKeyValues transforms a table record into the corresponding key-value diff --git a/pkg/sql/catalog/descpb/structured.go b/pkg/sql/catalog/descpb/structured.go index f61db366c664..e534393f8e2d 100644 --- a/pkg/sql/catalog/descpb/structured.go +++ b/pkg/sql/catalog/descpb/structured.go @@ -158,12 +158,12 @@ func (c ColumnIDs) Equals(input ColumnIDs) bool { // PermutationOf returns true if this list and the input list contain the same // set of column IDs in any order. Duplicate ColumnIDs have no effect. func (c ColumnIDs) PermutationOf(input ColumnIDs) bool { - ourColsSet := intsets.MakeFastIntSet() + ourColsSet := intsets.MakeFast() for _, col := range c { ourColsSet.Add(int(col)) } - inputColsSet := intsets.MakeFastIntSet() + inputColsSet := intsets.MakeFast() for _, inputCol := range input { inputColsSet.Add(int(inputCol)) } diff --git a/pkg/sql/catalog/descriptor_id_set.go b/pkg/sql/catalog/descriptor_id_set.go index 420a6bac7b05..3a6c5471c1e5 100644 --- a/pkg/sql/catalog/descriptor_id_set.go +++ b/pkg/sql/catalog/descriptor_id_set.go @@ -17,7 +17,7 @@ import ( // DescriptorIDSet efficiently stores an unordered set of descriptor ids. type DescriptorIDSet struct { - set intsets.FastIntSet + set intsets.Fast } // MakeDescriptorIDSet returns a set initialized with the given values. diff --git a/pkg/sql/catalog/descs/hydrate.go b/pkg/sql/catalog/descs/hydrate.go index 8151e72754f5..18818affa7ab 100644 --- a/pkg/sql/catalog/descs/hydrate.go +++ b/pkg/sql/catalog/descs/hydrate.go @@ -53,7 +53,7 @@ func (tc *Collection) hydrateDescriptors( ctx context.Context, txn *kv.Txn, flags tree.CommonLookupFlags, descs []catalog.Descriptor, ) error { - var hydratableMutableIndexes, hydratableImmutableIndexes intsets.FastIntSet + var hydratableMutableIndexes, hydratableImmutableIndexes intsets.Fast for i, desc := range descs { if desc == nil || !hydrateddesc.IsHydratable(desc) { continue diff --git a/pkg/sql/catalog/internal/validate/schema_changer_state.go b/pkg/sql/catalog/internal/validate/schema_changer_state.go index fb47e50dda06..a5dccd0c7e3a 100644 --- a/pkg/sql/catalog/internal/validate/schema_changer_state.go +++ b/pkg/sql/catalog/internal/validate/schema_changer_state.go @@ -79,7 +79,7 @@ func validateSchemaChangerState(d catalog.Descriptor, vea catalog.ValidationErro // Validate that the target ranks are unique. ranksToTarget := map[uint32]*scpb.Target{} { - var duplicates intsets.FastIntSet + var duplicates intsets.Fast for i, r := range scs.TargetRanks { if _, exists := ranksToTarget[r]; exists { duplicates.Add(int(r)) @@ -102,17 +102,17 @@ func validateSchemaChangerState(d catalog.Descriptor, vea catalog.ValidationErro // Validate that the statements refer exclusively to targets in this // descriptor. - statementsExpected := map[uint32]*intsets.FastIntSet{} + statementsExpected := map[uint32]*intsets.Fast{} for i := range scs.Targets { t := &scs.Targets[i] exp, ok := statementsExpected[t.Metadata.StatementID] if !ok { - exp = &intsets.FastIntSet{} + exp = &intsets.Fast{} statementsExpected[t.Metadata.StatementID] = exp } exp.Add(int(scs.TargetRanks[i])) } - var statementRanks intsets.FastIntSet + var statementRanks intsets.Fast for _, s := range scs.RelevantStatements { statementRanks.Add(int(s.StatementRank)) if _, ok := statementsExpected[s.StatementRank]; !ok { @@ -128,7 +128,7 @@ func validateSchemaChangerState(d catalog.Descriptor, vea catalog.ValidationErro // Validate that all targets have a corresponding statement. { - var expected intsets.FastIntSet + var expected intsets.Fast stmts := statementRanks.Copy() for rank := range statementsExpected { expected.Add(int(rank)) diff --git a/pkg/sql/catalog/post_deserialization_changes.go b/pkg/sql/catalog/post_deserialization_changes.go index 3f039b127ef9..f726e75679ff 100644 --- a/pkg/sql/catalog/post_deserialization_changes.go +++ b/pkg/sql/catalog/post_deserialization_changes.go @@ -19,7 +19,7 @@ type PostDeserializationChangeType int // PostDeserializationChanges are a set of booleans to indicate which types of // upgrades or fixes occurred when filling in the descriptor after // deserialization. -type PostDeserializationChanges struct{ s intsets.FastIntSet } +type PostDeserializationChanges struct{ s intsets.Fast } // HasChanges returns true if the set of changes is non-empty. func (c PostDeserializationChanges) HasChanges() bool { diff --git a/pkg/sql/catalog/table_col_set.go b/pkg/sql/catalog/table_col_set.go index 8676ded4d331..827334c4e524 100644 --- a/pkg/sql/catalog/table_col_set.go +++ b/pkg/sql/catalog/table_col_set.go @@ -17,7 +17,7 @@ import ( // TableColSet efficiently stores an unordered set of column ids. type TableColSet struct { - set intsets.FastIntSet + set intsets.Fast } // MakeTableColSet returns a set initialized with the given values. diff --git a/pkg/sql/catalog/table_col_set_test.go b/pkg/sql/catalog/table_col_set_test.go index d2bf59691f40..779017ebd39c 100644 --- a/pkg/sql/catalog/table_col_set_test.go +++ b/pkg/sql/catalog/table_col_set_test.go @@ -23,7 +23,7 @@ func BenchmarkTableColSet(b *testing.B) { const n = 50 b.Run("fastintset", func(b *testing.B) { for i := 0; i < b.N; i++ { - var c intsets.FastIntSet + var c intsets.Fast for j := 0; j < n; j++ { c.Add(j) } diff --git a/pkg/sql/catalog/table_elements.go b/pkg/sql/catalog/table_elements.go index a1cb28583cad..068711d916b6 100644 --- a/pkg/sql/catalog/table_elements.go +++ b/pkg/sql/catalog/table_elements.go @@ -930,7 +930,7 @@ func UserDefinedTypeColsInFamilyHaveSameVersion( return false, err } - familyCols := intsets.FastIntSet{} + familyCols := intsets.Fast{} for _, colID := range family.ColumnIDs { familyCols.Add(int(colID)) } diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index 621f40426202..69c1e67d3b8d 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -2042,7 +2042,7 @@ func (desc *wrapper) MakeFirstMutationPublic( } type mutationPublicationPolicy struct { - policy intsets.FastIntSet + policy intsets.Fast } func makeMutationPublicationPolicy( diff --git a/pkg/sql/catalog/tabledesc/validate.go b/pkg/sql/catalog/tabledesc/validate.go index 5f2bc9f874cd..bad8542898e1 100644 --- a/pkg/sql/catalog/tabledesc/validate.go +++ b/pkg/sql/catalog/tabledesc/validate.go @@ -658,7 +658,7 @@ func (desc *wrapper) ValidateSelf(vea catalog.ValidationErrorAccumulator) { // Validate mutations and exit early if any of these are deeply corrupted. { - var mutationIDs intsets.FastIntSet + var mutationIDs intsets.Fast mutationsHaveErrs := false for _, m := range desc.Mutations { mutationIDs.Add(int(m.MutationID)) @@ -1161,7 +1161,7 @@ func (desc *wrapper) validateUniqueWithoutIndexConstraints( } // Verify that the constraint's column IDs are valid and unique. - var seen intsets.FastIntSet + var seen intsets.Fast for i, n := 0, c.NumKeyColumns(); i < n; i++ { colID := c.GetKeyColumnID(i) _, ok := columnsByID[colID] diff --git a/pkg/sql/check.go b/pkg/sql/check.go index c3bb6dbfedfe..09625ff93069 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -819,7 +819,7 @@ func formatValues(colNames []string, values tree.Datums) string { // It is allowed to check only a subset of the active checks (the optimizer // could in principle determine that some checks can't fail because they // statically evaluate to true for the entire input). -type checkSet = intsets.FastIntSet +type checkSet = intsets.Fast // When executing mutations, we calculate a boolean column for each check // indicating if the check passed. This function verifies that each result is diff --git a/pkg/sql/colencoding/key_encoding.go b/pkg/sql/colencoding/key_encoding.go index 212b3437fb33..7885e43a5353 100644 --- a/pkg/sql/colencoding/key_encoding.go +++ b/pkg/sql/colencoding/key_encoding.go @@ -52,7 +52,7 @@ func DecodeKeyValsToCols( indexColIdx []int, checkAllColsForNull bool, keyCols []descpb.IndexFetchSpec_KeyColumn, - unseen *intsets.FastIntSet, + unseen *intsets.Fast, key []byte, scratch []byte, ) (remainingKey []byte, foundNull bool, retScratch []byte, _ error) { diff --git a/pkg/sql/colexec/colbuilder/execplan.go b/pkg/sql/colexec/colbuilder/execplan.go index 1791dc33e2ae..023d197ec7ab 100644 --- a/pkg/sql/colexec/colbuilder/execplan.go +++ b/pkg/sql/colexec/colbuilder/execplan.go @@ -130,7 +130,7 @@ type opResult struct { } func needHashAggregator(aggSpec *execinfrapb.AggregatorSpec) (bool, error) { - var groupCols, orderedCols intsets.FastIntSet + var groupCols, orderedCols intsets.Fast for _, col := range aggSpec.OrderedGroupCols { orderedCols.Add(int(col)) } diff --git a/pkg/sql/colexec/colexecspan/span_assembler_test.go b/pkg/sql/colexec/colexecspan/span_assembler_test.go index ce266b182da2..449a10011b4b 100644 --- a/pkg/sql/colexec/colexecspan/span_assembler_test.go +++ b/pkg/sql/colexec/colexecspan/span_assembler_test.go @@ -91,7 +91,7 @@ func TestSpanAssembler(t *testing.T) { if err != nil { t.Fatal(err) } - neededColumns := intsets.MakeFastIntSet(1, 2, 3, 4) + neededColumns := intsets.MakeFast(1, 2, 3, 4) cols := make([]coldata.Vec, len(typs)) for i, typ := range typs { diff --git a/pkg/sql/colfetcher/cfetcher.go b/pkg/sql/colfetcher/cfetcher.go index d2337a9c73a8..d54d600f56a1 100644 --- a/pkg/sql/colfetcher/cfetcher.go +++ b/pkg/sql/colfetcher/cfetcher.go @@ -52,7 +52,7 @@ type cTableInfo struct { // The set of required value-component column ordinals among only needed // columns. - neededValueColsByIdx intsets.FastIntSet + neededValueColsByIdx intsets.Fast // Map used to get the column index based on the descpb.ColumnID. // It's kept as a pointer so we don't have to re-allocate to sort it each @@ -66,7 +66,7 @@ type cTableInfo struct { // The set of column ordinals which are both composite and part of the index // key. - compositeIndexColOrdinals intsets.FastIntSet + compositeIndexColOrdinals intsets.Fast // One number per column coming from the "key suffix" that is part of the // value; each number is a column ordinal among only needed columns; -1 if @@ -246,7 +246,7 @@ type cFetcher struct { // remainingValueColsByIdx is the set of value columns that are yet to be // seen during the decoding of the current row. - remainingValueColsByIdx intsets.FastIntSet + remainingValueColsByIdx intsets.Fast // lastRowPrefix is the row prefix for the last row we saw a key for. New // keys are compared against this prefix to determine whether they're part // of a new row or not. diff --git a/pkg/sql/colfetcher/index_join.go b/pkg/sql/colfetcher/index_join.go index 73e66a911075..a3e187a65dae 100644 --- a/pkg/sql/colfetcher/index_join.go +++ b/pkg/sql/colfetcher/index_join.go @@ -95,7 +95,7 @@ type ColIndexJoin struct { // Fields that deal with variable-size types. hasVarSizeCols bool - varSizeVecIdxs intsets.FastIntSet + varSizeVecIdxs intsets.Fast byteLikeCols []*coldata.Bytes decimalCols []coldata.Decimals datumCols []coldata.DatumVec diff --git a/pkg/sql/colmem/allocator.go b/pkg/sql/colmem/allocator.go index 1ddb3d8c5b2b..9f10019eb680 100644 --- a/pkg/sql/colmem/allocator.go +++ b/pkg/sql/colmem/allocator.go @@ -821,7 +821,7 @@ type SetAccountingHelper struct { allFixedLength bool // bytesLikeVecIdxs stores the indices of all bytes-like vectors. - bytesLikeVecIdxs intsets.FastIntSet + bytesLikeVecIdxs intsets.Fast // bytesLikeVectors stores all actual bytes-like vectors. It is updated // every time a new batch is allocated. bytesLikeVectors []*coldata.Bytes @@ -830,7 +830,7 @@ type SetAccountingHelper struct { prevBytesLikeTotalSize int64 // decimalVecIdxs stores the indices of all decimal vectors. - decimalVecIdxs intsets.FastIntSet + decimalVecIdxs intsets.Fast // decimalVecs stores all decimal vectors. They are updated every time a new // batch is allocated. decimalVecs []coldata.Decimals @@ -849,7 +849,7 @@ type SetAccountingHelper struct { // varLenDatumVecIdxs stores the indices of all datum-backed vectors with // variable-length values. - varLenDatumVecIdxs intsets.FastIntSet + varLenDatumVecIdxs intsets.Fast // varLenDatumVecs stores all variable-sized datum-backed vectors. They are // updated every time a new batch is allocated. varLenDatumVecs []coldata.DatumVec diff --git a/pkg/sql/delegate/show_grants.go b/pkg/sql/delegate/show_grants.go index 8ed433e003da..61221f036f5d 100644 --- a/pkg/sql/delegate/show_grants.go +++ b/pkg/sql/delegate/show_grants.go @@ -251,7 +251,7 @@ SELECT database_name, } else if n.Targets != nil && len(n.Targets.Functions) > 0 { fmt.Fprint(&source, udfQuery) orderBy = "1,2,3,4,5,6" - fnResolved := intsets.MakeFastIntSet() + fnResolved := intsets.MakeFast() for _, fn := range n.Targets.Functions { un := fn.FuncName.ToUnresolvedObjectName().ToUnresolvedName() fd, err := d.catalog.ResolveFunction(d.ctx, un, &d.evalCtx.SessionData().SearchPath) diff --git a/pkg/sql/delete_preserving_index_test.go b/pkg/sql/delete_preserving_index_test.go index 6727a130395c..849a7faf64b8 100644 --- a/pkg/sql/delete_preserving_index_test.go +++ b/pkg/sql/delete_preserving_index_test.go @@ -740,7 +740,7 @@ func fetchIndex( idx, err := table.FindIndexWithName(indexName) require.NoError(t, err) colIdxMap := catalog.ColumnIDToOrdinalMap(table.PublicColumns()) - var valsNeeded intsets.FastIntSet + var valsNeeded intsets.Fast { colIDsNeeded := idx.CollectKeyColumnIDs() if idx.Primary() { diff --git a/pkg/sql/distinct.go b/pkg/sql/distinct.go index b2695fd45084..8700d5bfa8d0 100644 --- a/pkg/sql/distinct.go +++ b/pkg/sql/distinct.go @@ -28,11 +28,11 @@ type distinctNode struct { // Otherwise, distinctOnColIdxs is a strict subset of the child // planNode's column indices indicating which columns are specified in // the DISTINCT ON () clause. - distinctOnColIdxs intsets.FastIntSet + distinctOnColIdxs intsets.Fast // Subset of distinctOnColIdxs on which the input guarantees an ordering. // All rows that are equal on these columns appear contiguously in the input. - columnsInOrder intsets.FastIntSet + columnsInOrder intsets.Fast reqOrdering ReqOrdering diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index ce3c57c9b0d8..8e8bbe93d0de 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -1937,7 +1937,7 @@ func (dsp *DistSQLPlanner) planAggregators( groupCols[i] = uint32(p.PlanToStreamColMap[idx]) } orderedGroupCols := make([]uint32, len(info.groupColOrdering)) - var orderedGroupColSet intsets.FastIntSet + var orderedGroupColSet intsets.Fast for i, c := range info.groupColOrdering { orderedGroupCols[i] = uint32(p.PlanToStreamColMap[c.ColIdx]) orderedGroupColSet.Add(c.ColIdx) @@ -2002,7 +2002,7 @@ func (dsp *DistSQLPlanner) planAggregators( // left and right inputs of the join, respectively, then columns // 0, 1, ..., m-1 refer to the corresponding "left" columns whereas // m, m+1, ..., m+n-1 refer to the "right" ones. - var joinEqCols intsets.FastIntSet + var joinEqCols intsets.Fast m := len(prevStageProc.Input[0].ColumnTypes) for _, leftEqCol := range hjSpec.LeftEqColumns { joinEqCols.Add(int(leftEqCol)) @@ -2033,7 +2033,7 @@ func (dsp *DistSQLPlanner) planAggregators( } } if allDistinct { - var distinctColumnsSet intsets.FastIntSet + var distinctColumnsSet intsets.Fast for _, e := range info.aggregations { for _, colIdx := range e.ColIdx { distinctColumnsSet.Add(int(colIdx)) @@ -2588,7 +2588,7 @@ func (dsp *DistSQLPlanner) createPlanForIndexJoin( } fetchColIDs := make([]descpb.ColumnID, len(n.cols)) - var fetchOrdinals intsets.FastIntSet + var fetchOrdinals intsets.Fast for i := range n.cols { fetchColIDs[i] = n.cols[i].GetID() fetchOrdinals.Add(n.cols[i].Ordinal()) @@ -2671,7 +2671,7 @@ func (dsp *DistSQLPlanner) createPlanForLookupJoin( } fetchColIDs := make([]descpb.ColumnID, len(n.table.cols)) - var fetchOrdinals intsets.FastIntSet + var fetchOrdinals intsets.Fast for i := range n.table.cols { fetchColIDs[i] = n.table.cols[i].GetID() fetchOrdinals.Add(n.table.cols[i].Ordinal()) diff --git a/pkg/sql/drop_function.go b/pkg/sql/drop_function.go index 71e971b0533b..be6836bcaa61 100644 --- a/pkg/sql/drop_function.go +++ b/pkg/sql/drop_function.go @@ -51,7 +51,7 @@ func (p *planner) DropFunction( toDrop: make([]*funcdesc.Mutable, 0, len(n.Functions)), dropBehavior: n.DropBehavior, } - fnResolved := intsets.MakeFastIntSet() + fnResolved := intsets.MakeFast() for _, fn := range n.Functions { ol, err := p.matchUDF(ctx, &fn, !n.IfExists) if err != nil { diff --git a/pkg/sql/exec_factory_util.go b/pkg/sql/exec_factory_util.go index 7ac23b4f67a0..84b08a42d4d0 100644 --- a/pkg/sql/exec_factory_util.go +++ b/pkg/sql/exec_factory_util.go @@ -309,7 +309,7 @@ func constructOpaque(metadata opt.OpaqueMetadata) (planNode, error) { return o.plan, nil } -func convertFastIntSetToUint32Slice(colIdxs intsets.FastIntSet) []uint32 { +func convertFastIntSetToUint32Slice(colIdxs intsets.Fast) []uint32 { cols := make([]uint32, 0, colIdxs.Len()) for i, ok := colIdxs.Next(0); ok; i, ok = colIdxs.Next(i + 1) { cols = append(cols, uint32(i)) diff --git a/pkg/sql/execinfra/processorsbase.go b/pkg/sql/execinfra/processorsbase.go index f2bb7d95bea3..182cb04d4069 100644 --- a/pkg/sql/execinfra/processorsbase.go +++ b/pkg/sql/execinfra/processorsbase.go @@ -982,7 +982,7 @@ type LocalProcessor interface { // HasParallelProcessors returns whether flow contains multiple processors in // the same stage. func HasParallelProcessors(flow *execinfrapb.FlowSpec) bool { - var seen intsets.FastIntSet + var seen intsets.Fast for _, p := range flow.Processors { if seen.Contains(int(p.StageID)) { return true diff --git a/pkg/sql/execinfrapb/component_stats.go b/pkg/sql/execinfrapb/component_stats.go index 7e25c7f88c20..7cdba6f51796 100644 --- a/pkg/sql/execinfrapb/component_stats.go +++ b/pkg/sql/execinfrapb/component_stats.go @@ -417,8 +417,8 @@ func ExtractStatsFromSpans( // ExtractNodesFromSpans extracts a list of node ids from a set of tracing // spans. -func ExtractNodesFromSpans(spans []tracingpb.RecordedSpan) intsets.FastIntSet { - var nodes intsets.FastIntSet +func ExtractNodesFromSpans(spans []tracingpb.RecordedSpan) intsets.Fast { + var nodes intsets.Fast // componentStats is only used to check whether a structured payload item is // of ComponentStats type. var componentStats ComponentStats diff --git a/pkg/sql/importer/read_import_pgdump.go b/pkg/sql/importer/read_import_pgdump.go index f0c69eba163a..a8828328c3b8 100644 --- a/pkg/sql/importer/read_import_pgdump.go +++ b/pkg/sql/importer/read_import_pgdump.go @@ -1127,7 +1127,7 @@ func (m *pgDumpReader) readFile( var targetColMapIdx []int if len(i.Columns) != 0 { targetColMapIdx = make([]int, len(i.Columns)) - conv.TargetColOrds = intsets.FastIntSet{} + conv.TargetColOrds = intsets.Fast{} for j := range i.Columns { colName := string(i.Columns[j]) idx, ok := m.colMap[conv][colName] @@ -1194,7 +1194,7 @@ func (m *pgDumpReader) readFile( var targetColMapIdx []int if conv != nil { targetColMapIdx = make([]int, len(i.Columns)) - conv.TargetColOrds = intsets.FastIntSet{} + conv.TargetColOrds = intsets.Fast{} for j := range i.Columns { colName := string(i.Columns[j]) idx, ok := m.colMap[conv][colName] diff --git a/pkg/sql/insert_fast_path.go b/pkg/sql/insert_fast_path.go index 9bce529323ac..323fb90259a4 100644 --- a/pkg/sql/insert_fast_path.go +++ b/pkg/sql/insert_fast_path.go @@ -106,7 +106,7 @@ func (c *insertFastPathFKCheck) init(params runParams) error { codec := params.ExecCfg().Codec c.keyPrefix = rowenc.MakeIndexKeyPrefix(codec, c.tabDesc.GetID(), c.idx.GetID()) c.spanBuilder.Init(params.EvalContext(), codec, c.tabDesc, c.idx) - c.spanSplitter = span.MakeSplitter(c.tabDesc, c.idx, intsets.FastIntSet{} /* neededColOrdinals */) + c.spanSplitter = span.MakeSplitter(c.tabDesc, c.idx, intsets.Fast{} /* neededColOrdinals */) if len(c.InsertCols) > idx.numLaxKeyCols { return errors.AssertionFailedf( diff --git a/pkg/sql/instrumentation.go b/pkg/sql/instrumentation.go index cc1efa856ab7..a2304233b439 100644 --- a/pkg/sql/instrumentation.go +++ b/pkg/sql/instrumentation.go @@ -668,7 +668,7 @@ func (m execNodeTraceMetadata) annotateExplain( var nodeStats exec.ExecutionStats incomplete := false - var nodes intsets.FastIntSet + var nodes intsets.Fast regionsMap := make(map[string]struct{}) for _, c := range components { if c.Type == execinfrapb.ComponentID_PROCESSOR { diff --git a/pkg/sql/opt/colset.go b/pkg/sql/opt/colset.go index cd8119e94e0b..d6894143dba4 100644 --- a/pkg/sql/opt/colset.go +++ b/pkg/sql/opt/colset.go @@ -18,10 +18,10 @@ import ( // ColSet efficiently stores an unordered set of column ids. type ColSet struct { - set intsets.FastIntSet + set intsets.Fast } -// We offset the ColumnIDs in the underlying FastIntSet by 1, so that the +// We offset the ColumnIDs in the underlying Fast by 1, so that the // internal set fast-path can be used for ColumnIDs in the range [1, 64] instead // of [0, 63]. ColumnID 0 is reserved as an unknown ColumnID, and a ColSet // should never contain it, so this shift allows us to make use of the set @@ -116,7 +116,7 @@ func (s ColSet) SubsetOf(rhs ColSet) bool { return s.set.SubsetOf(rhs.set) } // numbers are shown as ranges. For example, for the set {1, 2, 3 5, 6, 10}, // the output is "(1-3,5,6,10)". func (s ColSet) String() string { - var noOffset intsets.FastIntSet + var noOffset intsets.Fast s.ForEach(func(col ColumnID) { noOffset.Add(int(col)) }) diff --git a/pkg/sql/opt/colset_test.go b/pkg/sql/opt/colset_test.go index 355668f648d6..f451df48ca17 100644 --- a/pkg/sql/opt/colset_test.go +++ b/pkg/sql/opt/colset_test.go @@ -22,7 +22,7 @@ func BenchmarkColSet(b *testing.B) { const n = 50 b.Run("fastintset", func(b *testing.B) { for i := 0; i < b.N; i++ { - var c intsets.FastIntSet + var c intsets.Fast for j := 1; j <= n; j++ { c.Add(j) } diff --git a/pkg/sql/opt/constraint/constraint_test.go b/pkg/sql/opt/constraint/constraint_test.go index 6188bec2a1e3..2b54f95abc32 100644 --- a/pkg/sql/opt/constraint/constraint_test.go +++ b/pkg/sql/opt/constraint/constraint_test.go @@ -596,7 +596,7 @@ func TestConsolidateLocalAndRemoteSpans(t *testing.T) { // only has the partitions and ps (PrefixSorter) elements populated. partitionSpans := parseSpans(&evalCtx, tc.partitionSpans) partitions := make([]testcat.Partition, partitionSpans.Count()) - localPartitions := intsets.FastIntSet{} + localPartitions := intsets.Fast{} for j := 0; j < partitionSpans.Count(); j++ { span := partitionSpans.Get(j) spanDatums := make([]tree.Datums, 1) diff --git a/pkg/sql/opt/doc.go b/pkg/sql/opt/doc.go index bdb69b8f7712..169db575e7de 100644 --- a/pkg/sql/opt/doc.go +++ b/pkg/sql/opt/doc.go @@ -436,7 +436,7 @@ is unique across the query. Column numbering involves assigning every base column and non-trivial projection in a query a unique, query-specific index. Giving each column a unique index allows the expression nodes mentioned above to track input and output columns, or really any set of columns during Prep and -later phases, using a bitmap (FastIntSet). The bitmap representation allows +later phases, using a bitmap (intsets.Fast). The bitmap representation allows fast determination of compatibility between expression nodes and is utilized by transforms to determine the legality of such operations. diff --git a/pkg/sql/opt/exec/execbuilder/mutation.go b/pkg/sql/opt/exec/execbuilder/mutation.go index 6c3d99429d15..788e35d46c80 100644 --- a/pkg/sql/opt/exec/execbuilder/mutation.go +++ b/pkg/sql/opt/exec/execbuilder/mutation.go @@ -652,8 +652,8 @@ func appendColsWhenPresent(dst opt.ColList, src opt.OptionalColList) opt.ColList // column ID in the given list. This is used with mutation operators, which // maintain lists that correspond to the target table, with zero column IDs // indicating columns that are not involved in the mutation. -func ordinalSetFromColList(colList opt.OptionalColList) intsets.FastIntSet { - var res intsets.FastIntSet +func ordinalSetFromColList(colList opt.OptionalColList) intsets.Fast { + var res intsets.Fast for i, col := range colList { if col != 0 { res.Add(i) diff --git a/pkg/sql/opt/exec/explain/plan_gist_factory.go b/pkg/sql/opt/exec/explain/plan_gist_factory.go index f32015a6d1f7..7579e5e0f10b 100644 --- a/pkg/sql/opt/exec/explain/plan_gist_factory.go +++ b/pkg/sql/opt/exec/explain/plan_gist_factory.go @@ -370,7 +370,7 @@ func (f *PlanGistFactory) decodeBool() bool { return val != 0 } -func (f *PlanGistFactory) encodeFastIntSet(s intsets.FastIntSet) { +func (f *PlanGistFactory) encodeFastIntSet(s intsets.Fast) { lenBefore := f.buffer.Len() if err := s.Encode(&f.buffer); err != nil { panic(err) @@ -415,7 +415,7 @@ func (f *PlanGistFactory) encodeScanParams(params exec.ScanParams) { } func (f *PlanGistFactory) decodeScanParams() exec.ScanParams { - neededCols := intsets.FastIntSet{} + neededCols := intsets.Fast{} err := neededCols.Decode(&f.buffer) if err != nil { panic(err) diff --git a/pkg/sql/opt/exec/factory.go b/pkg/sql/opt/exec/factory.go index 3b2448700eab..79e3e3d287ea 100644 --- a/pkg/sql/opt/exec/factory.go +++ b/pkg/sql/opt/exec/factory.go @@ -125,18 +125,18 @@ const ( type TableColumnOrdinal int32 // TableColumnOrdinalSet contains a set of TableColumnOrdinal values. -type TableColumnOrdinalSet = intsets.FastIntSet +type TableColumnOrdinalSet = intsets.Fast // NodeColumnOrdinal is the 0-based ordinal index of a column produced by a // Node. It is used when referring to a column in an input to an operator. type NodeColumnOrdinal int32 // NodeColumnOrdinalSet contains a set of NodeColumnOrdinal values. -type NodeColumnOrdinalSet = intsets.FastIntSet +type NodeColumnOrdinalSet = intsets.Fast // CheckOrdinalSet contains the ordinal positions of a set of check constraints // taken from the opt.Table.Check collection. -type CheckOrdinalSet = intsets.FastIntSet +type CheckOrdinalSet = intsets.Fast // AggInfo represents an aggregation (see ConstructGroupBy). type AggInfo struct { diff --git a/pkg/sql/opt/indexrec/candidate.go b/pkg/sql/opt/indexrec/candidate.go index a3afc3dfbcf5..d94b5f265476 100644 --- a/pkg/sql/opt/indexrec/candidate.go +++ b/pkg/sql/opt/indexrec/candidate.go @@ -281,7 +281,7 @@ func constructLeftIndexCombination( rightIndexes [][]cat.IndexColumn, outputIndexes map[cat.Table][][]cat.IndexColumn, ) { - var leftIndexColSet intsets.FastIntSet + var leftIndexColSet intsets.Fast // Store left columns in a set for fast access. for _, leftCol := range leftIndex { leftIndexColSet.Add(int(leftCol.ColID())) diff --git a/pkg/sql/opt/indexrec/hypothetical_index.go b/pkg/sql/opt/indexrec/hypothetical_index.go index 03745bb7ded4..ef2f5eb0b836 100644 --- a/pkg/sql/opt/indexrec/hypothetical_index.go +++ b/pkg/sql/opt/indexrec/hypothetical_index.go @@ -67,7 +67,7 @@ func (hi *hypotheticalIndex) init( hi.zone = zone // Build an index column ordinal set. - var colsOrdSet intsets.FastIntSet + var colsOrdSet intsets.Fast for _, col := range hi.cols { colsOrdSet.Add(col.Ordinal()) } diff --git a/pkg/sql/opt/indexrec/hypothetical_table.go b/pkg/sql/opt/indexrec/hypothetical_table.go index 7704ef711a96..d234577fab93 100644 --- a/pkg/sql/opt/indexrec/hypothetical_table.go +++ b/pkg/sql/opt/indexrec/hypothetical_table.go @@ -79,7 +79,7 @@ func BuildOptAndHypTableMaps( type HypotheticalTable struct { cat.Table invertedCols []*cat.Column - primaryKeyColsOrdSet intsets.FastIntSet + primaryKeyColsOrdSet intsets.Fast hypotheticalIndexes []hypotheticalIndex } diff --git a/pkg/sql/opt/indexrec/rec.go b/pkg/sql/opt/indexrec/rec.go index 3c755a2fd6b0..b8bb46d43ce0 100644 --- a/pkg/sql/opt/indexrec/rec.go +++ b/pkg/sql/opt/indexrec/rec.go @@ -94,8 +94,8 @@ func (rc recCollector) addIndexRec(md *opt.Metadata, expr opt.Expr) { } // getStoredCols returns stored columns of the given existingIndex. -func getStoredCols(existingIndex cat.Index) intsets.FastIntSet { - var existingStoredOrds intsets.FastIntSet +func getStoredCols(existingIndex cat.Index) intsets.Fast { + var existingStoredOrds intsets.Fast for i, n := existingIndex.KeyColumnCount(), existingIndex.ColumnCount(); i < n; i++ { existingStoredOrds.Add(existingIndex.Column(i).Ordinal()) } @@ -104,8 +104,8 @@ func getStoredCols(existingIndex cat.Index) intsets.FastIntSet { // getAllCols returns columns of the given existingIndex including in the // explicit columns and STORING clause. -func getAllCols(existingIndex cat.Index) intsets.FastIntSet { - var existingAllOrds intsets.FastIntSet +func getAllCols(existingIndex cat.Index) intsets.Fast { + var existingAllOrds intsets.Fast for i, n := 0, existingIndex.ColumnCount(); i < n; i++ { existingAllOrds.Add(existingIndex.Column(i).Ordinal()) } @@ -136,13 +136,12 @@ func getAllCols(existingIndex cat.Index) intsets.FastIntSet { // candidate for existing index, and its already stored columns. If not found, // this means that there does not exist an index that satisfy the requirement to // be a candidate. So no existing indexes can be replaced, and creating a new -// index is necessary. It returns TypeCreateIndex, nil, and -// intsets.FastIntSet{}. If there is a candidate that stores every column from -// actuallyScannedCols, typeUseless, nil, {} is returned. Theoretically, this -// should never happen. +// index is necessary. It returns TypeCreateIndex, nil, and intsets.Fast. If +// there is a candidate that stores every column from actuallyScannedCols, +// typeUseless, nil, {} is returned. Theoretically, this should never happen. func findBestExistingIndexToReplace( - table cat.Table, hypIndex *hypotheticalIndex, actuallyScannedCols intsets.FastIntSet, -) (Type, cat.Index, intsets.FastIntSet) { + table cat.Table, hypIndex *hypotheticalIndex, actuallyScannedCols intsets.Fast, +) (Type, cat.Index, intsets.Fast) { // To find the existing index with most columns in actuallyScannedCol, we keep // track of the best candidate for existing index and its stored columns. @@ -158,7 +157,7 @@ func findBestExistingIndexToReplace( // actuallyScannedCol). minColsDiff := actuallyScannedCols.Len() var existingIndexCandidate cat.Index - var existingIndexCandidateStoredCol intsets.FastIntSet + var existingIndexCandidateStoredCol intsets.Fast for i, n := 0, table.IndexCount(); i < n; i++ { // Iterate through every existing index in the table. @@ -179,7 +178,7 @@ func findBestExistingIndexToReplace( // SELECT a FROM t WHERE b > 0, hypIndex(a), actuallyScannedCol b. // invisible_idx(a, b) could still be used. Creating a new index with // idx(a) STORING b is unnecessary. - return TypeAlterIndex, existingIndex, intsets.FastIntSet{} + return TypeAlterIndex, existingIndex, intsets.Fast{} } // Skip any invisible indexes. continue @@ -207,7 +206,7 @@ func findBestExistingIndexToReplace( // scanned cols is neither included in explicit columns nor stored. // Otherwise, the optimizer should use the existing index, and no index // recommendation should be constructed. - return typeUseless, nil, intsets.FastIntSet{} + return typeUseless, nil, intsets.Fast{} } else if existingIndexCandidate == nil || storedColsDiffSet.Len() < minColsDiff { // Otherwise, storedColsDiffSet is non-empty. The existing index is // missing some columns in actuallyScannedCol. If no candidate has been @@ -223,7 +222,7 @@ func findBestExistingIndexToReplace( if existingIndexCandidate == nil { // There doesn't exist an index with same explicit columns as hypIndex. // Recommend index creation. - return TypeCreateIndex, nil, intsets.FastIntSet{} + return TypeCreateIndex, nil, intsets.Fast{} } return TypeReplaceIndex, existingIndexCandidate, existingIndexCandidateStoredCol @@ -320,8 +319,8 @@ func (rc recCollector) outputIndexRec() []Rec { // getColOrdSet returns the set of column ordinals within the given table that // are contained in cols. -func getColOrdSet(md *opt.Metadata, cols opt.ColSet, tabID opt.TableID) intsets.FastIntSet { - var colsOrdSet intsets.FastIntSet +func getColOrdSet(md *opt.Metadata, cols opt.ColSet, tabID opt.TableID) intsets.Fast { + var colsOrdSet intsets.Fast cols.ForEach(func(col opt.ColumnID) { table := md.ColumnMeta(col).Table // Do not add columns from other tables. @@ -401,13 +400,13 @@ type indexRecommendation struct { // newStoredColOrds stores the stored column ordinals that are scanned by the // optimizer in the expression tree passed to FindRecs. - newStoredColOrds intsets.FastIntSet + newStoredColOrds intsets.Fast } // init initializes an index recommendation. If there is an existingIndex with // the same explicit columns, it is stored here. func (ir *indexRecommendation) init( - indexOrd int, hypTable *HypotheticalTable, scannedColOrds intsets.FastIntSet, + indexOrd int, hypTable *HypotheticalTable, scannedColOrds intsets.Fast, ) { index := hypTable.Index(indexOrd).(*hypotheticalIndex) ir.index = index @@ -422,7 +421,7 @@ func (ir *indexRecommendation) init( // addStoredColOrds updates an index recommendation's newStoredColOrds field to // also contain the scannedColOrds columns. -func (ir *indexRecommendation) addStoredColOrds(scannedColOrds intsets.FastIntSet) { +func (ir *indexRecommendation) addStoredColOrds(scannedColOrds intsets.Fast) { for i := range ir.index.storedCols { colOrd := ir.index.storedCols[i].Column.Ordinal() if scannedColOrds.Contains(colOrd) { diff --git a/pkg/sql/opt/lookupjoin/constraint_builder.go b/pkg/sql/opt/lookupjoin/constraint_builder.go index 12a3a70ae250..966fb1137021 100644 --- a/pkg/sql/opt/lookupjoin/constraint_builder.go +++ b/pkg/sql/opt/lookupjoin/constraint_builder.go @@ -216,7 +216,7 @@ func (b *ConstraintBuilder) Build( var inputProjections memo.ProjectionsExpr var lookupExpr memo.FiltersExpr var constFilters memo.FiltersExpr - var filterOrdsToExclude intsets.FastIntSet + var filterOrdsToExclude intsets.Fast foundLookupCols := false lookupExprRequired := false remainingFilters := make(memo.FiltersExpr, 0, len(onFilters)) diff --git a/pkg/sql/opt/memo/expr.go b/pkg/sql/opt/memo/expr.go index 77452b194cf1..432495144aff 100644 --- a/pkg/sql/opt/memo/expr.go +++ b/pkg/sql/opt/memo/expr.go @@ -422,7 +422,7 @@ type ScanFlags struct { // ZigzagIndexes makes planner prefer a zigzag with particular indexes. // ForceZigzag must also be true. - ZigzagIndexes intsets.FastIntSet + ZigzagIndexes intsets.Fast } // Empty returns true if there are no flags set. diff --git a/pkg/sql/opt/memo/extract.go b/pkg/sql/opt/memo/extract.go index f0120b4752af..b23410562838 100644 --- a/pkg/sql/opt/memo/extract.go +++ b/pkg/sql/opt/memo/extract.go @@ -177,7 +177,7 @@ func HasJoinCondition(leftCols, rightCols opt.ColSet, on FiltersExpr, inequality // equalities. func ExtractJoinConditionFilterOrds( leftCols, rightCols opt.ColSet, on FiltersExpr, inequality bool, -) (filterOrds intsets.FastIntSet) { +) (filterOrds intsets.Fast) { var seenCols opt.ColSet for i := range on { condition := on[i].Condition diff --git a/pkg/sql/opt/memo/interner_test.go b/pkg/sql/opt/memo/interner_test.go index c7fb393ccef1..ec44319fe4a0 100644 --- a/pkg/sql/opt/memo/interner_test.go +++ b/pkg/sql/opt/memo/interner_test.go @@ -378,7 +378,7 @@ func TestInterner(t *testing.T) { {hashFn: in.hasher.HashScanFlags, eqFn: in.hasher.IsScanFlagsEqual, variations: []testVariation{ // Use unnamed fields so that compilation fails if a new field is // added to ScanFlags. - {val1: ScanFlags{false, false, false, false, false, 0, 0, false, intsets.FastIntSet{}}, val2: ScanFlags{}, equal: true}, + {val1: ScanFlags{false, false, false, false, false, 0, 0, false, intsets.Fast{}}, val2: ScanFlags{}, equal: true}, {val1: ScanFlags{}, val2: ScanFlags{}, equal: true}, {val1: ScanFlags{NoIndexJoin: false}, val2: ScanFlags{NoIndexJoin: true}, equal: false}, {val1: ScanFlags{NoIndexJoin: true}, val2: ScanFlags{NoIndexJoin: true}, equal: true}, @@ -462,9 +462,9 @@ func TestInterner(t *testing.T) { }}, {hashFn: in.hasher.HashSchemaTypeDeps, eqFn: in.hasher.IsSchemaTypeDepsEqual, variations: []testVariation{ - {val1: intsets.MakeFastIntSet(), val2: intsets.MakeFastIntSet(), equal: true}, - {val1: intsets.MakeFastIntSet(1, 2, 3), val2: intsets.MakeFastIntSet(3, 2, 1), equal: true}, - {val1: intsets.MakeFastIntSet(1, 2, 3), val2: intsets.MakeFastIntSet(1, 2), equal: false}, + {val1: intsets.MakeFast(), val2: intsets.MakeFast(), equal: true}, + {val1: intsets.MakeFast(1, 2, 3), val2: intsets.MakeFast(3, 2, 1), equal: true}, + {val1: intsets.MakeFast(1, 2, 3), val2: intsets.MakeFast(1, 2), equal: false}, }}, {hashFn: in.hasher.HashWindowFrame, eqFn: in.hasher.IsWindowFrameEqual, variations: []testVariation{ diff --git a/pkg/sql/opt/metadata.go b/pkg/sql/opt/metadata.go index 5dafce1dd3dc..1a0b72220069 100644 --- a/pkg/sql/opt/metadata.go +++ b/pkg/sql/opt/metadata.go @@ -722,7 +722,7 @@ func (md *Metadata) AllViews() []cat.View { func (md *Metadata) getAllReferencedTables( ctx context.Context, catalog cat.Catalog, ) []cat.DataSource { - var tableSet intsets.FastIntSet + var tableSet intsets.Fast var tableList []cat.DataSource var addForeignKeyReferencedTables func(tab cat.Table) addForeignKeyReferencedTables = func(tab cat.Table) { diff --git a/pkg/sql/opt/norm/factory.go b/pkg/sql/opt/norm/factory.go index 7e22fb0d1a60..594740a474a6 100644 --- a/pkg/sql/opt/norm/factory.go +++ b/pkg/sql/opt/norm/factory.go @@ -102,7 +102,7 @@ type Factory struct { // disabledRules is a set of rules that are not allowed to run, used when // rules are disabled during testing to prevent rule cycles. - disabledRules intsets.FastIntSet + disabledRules intsets.Fast } // maxConstructorStackDepth is the maximum allowed depth of a constructor call @@ -217,7 +217,7 @@ func (f *Factory) NotifyOnAppliedRule(appliedRule AppliedRuleFunc) { // disabled during testing. SetDisabledRules does not prevent rules from // matching - rather, it notifies the Factory that rules have been prevented // from matching using NotifyOnMatchedRule. -func (f *Factory) SetDisabledRules(disabledRules intsets.FastIntSet) { +func (f *Factory) SetDisabledRules(disabledRules intsets.Fast) { f.disabledRules = disabledRules } diff --git a/pkg/sql/opt/norm/inline_funcs.go b/pkg/sql/opt/norm/inline_funcs.go index 309b84925de1..0dfba69a3617 100644 --- a/pkg/sql/opt/norm/inline_funcs.go +++ b/pkg/sql/opt/norm/inline_funcs.go @@ -317,7 +317,7 @@ func (c *CustomFuncs) extractVarEqualsConst( func (c *CustomFuncs) CanInlineConstVar(f memo.FiltersExpr) bool { // usedIndices tracks the set of filter indices we've used to infer constant // values, so we don't inline into them. - var usedIndices intsets.FastIntSet + var usedIndices intsets.Fast // fixedCols is the set of columns that the filters restrict to be a constant // value. var fixedCols opt.ColSet @@ -350,7 +350,7 @@ func (c *CustomFuncs) CanInlineConstVar(f memo.FiltersExpr) bool { func (c *CustomFuncs) InlineConstVar(f memo.FiltersExpr) memo.FiltersExpr { // usedIndices tracks the set of filter indices we've used to infer constant // values, so we don't inline into them. - var usedIndices intsets.FastIntSet + var usedIndices intsets.Fast // fixedCols is the set of columns that the filters restrict to be a constant // value. var fixedCols opt.ColSet diff --git a/pkg/sql/opt/norm/project_funcs.go b/pkg/sql/opt/norm/project_funcs.go index 840b3fc25607..ae32774e7324 100644 --- a/pkg/sql/opt/norm/project_funcs.go +++ b/pkg/sql/opt/norm/project_funcs.go @@ -754,7 +754,7 @@ func (c *CustomFuncs) PushAssignmentCastsIntoValues( // will map a new column produced by the new values expression to their // output column. castOrds tracks the column ordinals in the values // expression to push assignment casts down to. - var castOrds intsets.FastIntSet + var castOrds intsets.Fast newProjections := make(memo.ProjectionsExpr, 0, len(projections)) for i := range projections { col, targetType, ok := extractAssignmentCastInputColAndTargetType(projections[i].Element) diff --git a/pkg/sql/opt/norm/prune_cols_funcs.go b/pkg/sql/opt/norm/prune_cols_funcs.go index 1e2f82d6c03b..d52b140b9a5b 100644 --- a/pkg/sql/opt/norm/prune_cols_funcs.go +++ b/pkg/sql/opt/norm/prune_cols_funcs.go @@ -505,7 +505,7 @@ func (c *CustomFuncs) PruneWindows(needed opt.ColSet, windows memo.WindowsExpr) // are randomly disabled for testing. It is used to prevent propagating the // PruneCols property when the corresponding column-pruning normalization rule // is disabled. This prevents rule cycles during testing. -func DerivePruneCols(e memo.RelExpr, disabledRules intsets.FastIntSet) opt.ColSet { +func DerivePruneCols(e memo.RelExpr, disabledRules intsets.Fast) opt.ColSet { relProps := e.Relational() if relProps.IsAvailable(props.PruneCols) { return relProps.Rule.PruneCols diff --git a/pkg/sql/opt/norm/reject_nulls_funcs.go b/pkg/sql/opt/norm/reject_nulls_funcs.go index b7ee010cae5f..a368ea46246d 100644 --- a/pkg/sql/opt/norm/reject_nulls_funcs.go +++ b/pkg/sql/opt/norm/reject_nulls_funcs.go @@ -124,7 +124,7 @@ func (c *CustomFuncs) NullRejectProjections( // are randomly disabled for testing. It is used to prevent propagating the // RejectNullCols property when the corresponding column-pruning normalization // rule is disabled. This prevents rule cycles during testing. -func DeriveRejectNullCols(in memo.RelExpr, disabledRules intsets.FastIntSet) opt.ColSet { +func DeriveRejectNullCols(in memo.RelExpr, disabledRules intsets.Fast) opt.ColSet { // Lazily calculate and store the RejectNullCols value. relProps := in.Relational() if relProps.IsAvailable(props.RejectNullCols) { @@ -231,7 +231,7 @@ func DeriveRejectNullCols(in memo.RelExpr, disabledRules intsets.FastIntSet) opt // 2. The aggregate function returns null if its input is empty. And since // by #1, the presence of nulls does not alter the result, the aggregate // function would return null if its input contains only null values. -func deriveGroupByRejectNullCols(in memo.RelExpr, disabledRules intsets.FastIntSet) opt.ColSet { +func deriveGroupByRejectNullCols(in memo.RelExpr, disabledRules intsets.Fast) opt.ColSet { input := in.Child(0).(memo.RelExpr) aggs := *in.Child(1).(*memo.AggregationsExpr) @@ -310,7 +310,7 @@ func (c *CustomFuncs) MakeNullRejectFilters(nullRejectCols opt.ColSet) memo.Filt // // 1. The projection "transmits" nulls - it returns NULL when one or more of // its inputs is NULL. -func deriveProjectRejectNullCols(in memo.RelExpr, disabledRules intsets.FastIntSet) opt.ColSet { +func deriveProjectRejectNullCols(in memo.RelExpr, disabledRules intsets.Fast) opt.ColSet { rejectNullCols := DeriveRejectNullCols(in.Child(0).(memo.RelExpr), disabledRules) projections := *in.Child(1).(*memo.ProjectionsExpr) var projectionsRejectCols opt.ColSet diff --git a/pkg/sql/opt/optbuilder/arbiter_set.go b/pkg/sql/opt/optbuilder/arbiter_set.go index fc96af5d388c..8c3ffd40da76 100644 --- a/pkg/sql/opt/optbuilder/arbiter_set.go +++ b/pkg/sql/opt/optbuilder/arbiter_set.go @@ -26,11 +26,11 @@ type arbiterSet struct { // indexes contains the index arbiters in the set, as ordinals into the // table's indexes. - indexes intsets.FastIntSet + indexes intsets.Fast // uniqueConstraints contains the unique constraint arbiters in the set, as // ordinals into the table's unique constraints. - uniqueConstraints intsets.FastIntSet + uniqueConstraints intsets.Fast } // makeArbiterSet returns an initialized arbiterSet. @@ -106,7 +106,7 @@ func (a *arbiterSet) ContainsUniqueConstraint(uniq cat.UniqueOrdinal) bool { // - canaryOrd is the table column ordinal of a not-null column in the // constraint's table. func (a *arbiterSet) ForEach( - f func(name string, conflictOrds intsets.FastIntSet, pred tree.Expr, canaryOrd int), + f func(name string, conflictOrds intsets.Fast, pred tree.Expr, canaryOrd int), ) { // Call the callback for each index arbiter. a.indexes.ForEach(func(i int) { @@ -187,7 +187,7 @@ type minArbiterSet struct { // indexConflictOrdsCache caches the conflict column sets of arbiter indexes // in the set. - indexConflictOrdsCache map[cat.IndexOrdinal]intsets.FastIntSet + indexConflictOrdsCache map[cat.IndexOrdinal]intsets.Fast } // makeMinArbiterSet returns an initialized arbiterSet. @@ -258,7 +258,7 @@ func (m *minArbiterSet) initCache() { return } // Cache each index's conflict columns. - m.indexConflictOrdsCache = make(map[cat.IndexOrdinal]intsets.FastIntSet, m.as.indexes.Len()) + m.indexConflictOrdsCache = make(map[cat.IndexOrdinal]intsets.Fast, m.as.indexes.Len()) m.as.indexes.ForEach(func(i int) { index := m.as.mb.tab.Index(i) m.indexConflictOrdsCache[i] = getIndexLaxKeyOrdinals(index) diff --git a/pkg/sql/opt/optbuilder/create_function.go b/pkg/sql/opt/optbuilder/create_function.go index e71d9b1fad6b..0d1418cd7497 100644 --- a/pkg/sql/opt/optbuilder/create_function.go +++ b/pkg/sql/opt/optbuilder/create_function.go @@ -53,7 +53,7 @@ func (b *Builder) buildCreateFunction(cf *tree.CreateFunction, inScope *scope) ( b.insideFuncDef = false b.trackSchemaDeps = false b.schemaDeps = nil - b.schemaTypeDeps = intsets.FastIntSet{} + b.schemaTypeDeps = intsets.Fast{} b.qualifyDataSourceNamesInAST = false b.semaCtx.FunctionResolver = preFuncResolver @@ -164,7 +164,7 @@ func (b *Builder) buildCreateFunction(cf *tree.CreateFunction, inScope *scope) ( typeDeps.UnionWith(b.schemaTypeDeps) // Reset the tracked dependencies for next statement. b.schemaDeps = nil - b.schemaTypeDeps = intsets.FastIntSet{} + b.schemaTypeDeps = intsets.Fast{} } // Override the function body so that references are fully qualified. diff --git a/pkg/sql/opt/optbuilder/create_view.go b/pkg/sql/opt/optbuilder/create_view.go index 200c599ced56..b3239331a54d 100644 --- a/pkg/sql/opt/optbuilder/create_view.go +++ b/pkg/sql/opt/optbuilder/create_view.go @@ -42,7 +42,7 @@ func (b *Builder) buildCreateView(cv *tree.CreateView, inScope *scope) (outScope b.insideViewDef = false b.trackSchemaDeps = false b.schemaDeps = nil - b.schemaTypeDeps = intsets.FastIntSet{} + b.schemaTypeDeps = intsets.Fast{} b.qualifyDataSourceNamesInAST = false b.semaCtx.FunctionResolver = preFuncResolver diff --git a/pkg/sql/opt/optbuilder/fk_cascade.go b/pkg/sql/opt/optbuilder/fk_cascade.go index edb0a85aaba8..313e098e8cca 100644 --- a/pkg/sql/opt/optbuilder/fk_cascade.go +++ b/pkg/sql/opt/optbuilder/fk_cascade.go @@ -207,7 +207,7 @@ func tryNewOnDeleteFastCascadeBuilder( return nil, false } - var visited intsets.FastIntSet + var visited intsets.Fast parentTabID := parentTab.ID() childTabID := childTab.ID() diff --git a/pkg/sql/opt/optbuilder/insert.go b/pkg/sql/opt/optbuilder/insert.go index 479a500550ff..5951ab72482b 100644 --- a/pkg/sql/opt/optbuilder/insert.go +++ b/pkg/sql/opt/optbuilder/insert.go @@ -707,14 +707,14 @@ func (mb *mutationBuilder) buildInputForDoNothing(inScope *scope, onConflict *tr mb.outScope.ordering = nil // Create an anti-join for each arbiter. - mb.arbiters.ForEach(func(name string, conflictOrds intsets.FastIntSet, pred tree.Expr, canaryOrd int) { + mb.arbiters.ForEach(func(name string, conflictOrds intsets.Fast, pred tree.Expr, canaryOrd int) { mb.buildAntiJoinForDoNothingArbiter(inScope, conflictOrds, pred) }) // Create an UpsertDistinctOn for each arbiter. This must happen after all // conflicting rows are removed with the anti-joins created above, to avoid // removing valid rows (see #59125). - mb.arbiters.ForEach(func(name string, conflictOrds intsets.FastIntSet, pred tree.Expr, canaryOrd int) { + mb.arbiters.ForEach(func(name string, conflictOrds intsets.Fast, pred tree.Expr, canaryOrd int) { // If the arbiter has a partial predicate, project a new column that // allows the UpsertDistinctOn to only de-duplicate insert rows that // satisfy the predicate. See projectPartialArbiterDistinctColumn for @@ -760,7 +760,7 @@ func (mb *mutationBuilder) buildInputForUpsert( // Create an UpsertDistinctOn and a left-join for the single arbiter. var canaryCol *scopeColumn - mb.arbiters.ForEach(func(name string, conflictOrds intsets.FastIntSet, pred tree.Expr, canaryOrd int) { + mb.arbiters.ForEach(func(name string, conflictOrds intsets.Fast, pred tree.Expr, canaryOrd int) { // If the arbiter has a partial predicate, project a new column that // allows the UpsertDistinctOn to only de-duplicate insert rows that // satisfy the predicate. See projectPartialArbiterDistinctColumn for diff --git a/pkg/sql/opt/optbuilder/join.go b/pkg/sql/opt/optbuilder/join.go index 577b8fab4a94..be883ffd63a6 100644 --- a/pkg/sql/opt/optbuilder/join.go +++ b/pkg/sql/opt/optbuilder/join.go @@ -171,12 +171,12 @@ func (b *Builder) validateJoinTableNames(leftScope, rightScope *scope) { } } -// findJoinColsToValidate creates a FastIntSet containing the ordinal of each +// findJoinColsToValidate creates a Fast containing the ordinal of each // column that has a different table name than the previous column. This is a // fast way of reducing the set of columns that need to checked for duplicate // names by validateJoinTableNames. -func (b *Builder) findJoinColsToValidate(scope *scope) intsets.FastIntSet { - var ords intsets.FastIntSet +func (b *Builder) findJoinColsToValidate(scope *scope) intsets.Fast { + var ords intsets.Fast for i := range scope.cols { // Allow joins of sources that define columns with no // associated table name. At worst, the USING/NATURAL diff --git a/pkg/sql/opt/optbuilder/mutation_builder.go b/pkg/sql/opt/optbuilder/mutation_builder.go index da26468d75ee..5b152eed1da8 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder.go +++ b/pkg/sql/opt/optbuilder/mutation_builder.go @@ -1214,8 +1214,8 @@ func (mb *mutationBuilder) parseUniqueConstraintPredicateExpr(uniq cat.UniqueOrd // getIndexLaxKeyOrdinals returns the ordinals of all lax key columns in the // given index. A column's ordinal is the ordered position of that column in the // owning table. -func getIndexLaxKeyOrdinals(index cat.Index) intsets.FastIntSet { - var keyOrds intsets.FastIntSet +func getIndexLaxKeyOrdinals(index cat.Index) intsets.Fast { + var keyOrds intsets.Fast for i, n := 0, index.LaxKeyColumnCount(); i < n; i++ { keyOrds.Add(index.Column(i).Ordinal()) } @@ -1225,8 +1225,8 @@ func getIndexLaxKeyOrdinals(index cat.Index) intsets.FastIntSet { // getUniqueConstraintOrdinals returns the ordinals of all columns in the given // unique constraint. A column's ordinal is the ordered position of that column // in the owning table. -func getUniqueConstraintOrdinals(tab cat.Table, uc cat.UniqueConstraint) intsets.FastIntSet { - var ucOrds intsets.FastIntSet +func getUniqueConstraintOrdinals(tab cat.Table, uc cat.UniqueConstraint) intsets.Fast { + var ucOrds intsets.Fast for i, n := 0, uc.ColumnCount(); i < n; i++ { ucOrds.Add(uc.ColumnOrdinal(tab, i)) } @@ -1236,10 +1236,10 @@ func getUniqueConstraintOrdinals(tab cat.Table, uc cat.UniqueConstraint) intsets // getExplicitPrimaryKeyOrdinals returns the ordinals of the primary key // columns, excluding any implicit partitioning or hash-shard columns in the // primary index. -func getExplicitPrimaryKeyOrdinals(tab cat.Table) intsets.FastIntSet { +func getExplicitPrimaryKeyOrdinals(tab cat.Table) intsets.Fast { index := tab.Index(cat.PrimaryIndex) skipCols := index.ImplicitColumnCount() - var keyOrds intsets.FastIntSet + var keyOrds intsets.Fast for i, n := skipCols, index.LaxKeyColumnCount(); i < n; i++ { keyOrds.Add(index.Column(i).Ordinal()) } diff --git a/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go b/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go index 6b7e14fa26a7..88ac7980c065 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go +++ b/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go @@ -92,7 +92,7 @@ func (mb *mutationBuilder) findArbiters(onConflict *tree.OnConflict) arbiterSet )) } // We have to infer an arbiter set. - var ords intsets.FastIntSet + var ords intsets.Fast for _, name := range onConflict.Columns { found := false for i, n := 0, mb.tab.ColumnCount(); i < n; i++ { @@ -151,7 +151,7 @@ func partialIndexArbiterError(onConflict *tree.OnConflict, tableName tree.Name) // found. // 3. Otherwise, returns all partial arbiter indexes and constraints. func (mb *mutationBuilder) inferArbitersFromConflictOrds( - conflictOrds intsets.FastIntSet, arbiterPredicate tree.Expr, + conflictOrds intsets.Fast, arbiterPredicate tree.Expr, ) arbiterSet { // If conflictOrds is empty, then all unique indexes and unique without // index constraints are arbiters. @@ -277,7 +277,7 @@ func (mb *mutationBuilder) inferArbitersFromConflictOrds( // - pred is the partial index or constraint predicate. If the arbiter is // not a partial index or constraint, pred is nil. func (mb *mutationBuilder) buildAntiJoinForDoNothingArbiter( - inScope *scope, conflictOrds intsets.FastIntSet, pred tree.Expr, + inScope *scope, conflictOrds intsets.Fast, pred tree.Expr, ) { // Build the right side of the anti-join. Use a new metadata instance // of the mutation table so that a different set of column IDs are used for @@ -359,7 +359,7 @@ func (mb *mutationBuilder) buildAntiJoinForDoNothingArbiter( // only de-duplicate insert rows that satisfy the partial index predicate. // If the arbiter is not a partial index, partialIndexDistinctCol is nil. func (mb *mutationBuilder) buildLeftJoinForUpsertArbiter( - inScope *scope, conflictOrds intsets.FastIntSet, pred tree.Expr, + inScope *scope, conflictOrds intsets.Fast, pred tree.Expr, ) { // Build the right side of the left outer join. Use a different instance of // table metadata so that col IDs do not overlap. @@ -450,7 +450,7 @@ func (mb *mutationBuilder) buildLeftJoinForUpsertArbiter( // should trigger an error. If empty, no error is triggered. func (mb *mutationBuilder) buildDistinctOnForArbiter( insertColScope *scope, - conflictOrds intsets.FastIntSet, + conflictOrds intsets.Fast, partialArbiterDistinctCol *scopeColumn, errorOnDup string, ) { diff --git a/pkg/sql/opt/optbuilder/mutation_builder_unique.go b/pkg/sql/opt/optbuilder/mutation_builder_unique.go index aebba9c61a10..70b81f28860f 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder_unique.go +++ b/pkg/sql/opt/optbuilder/mutation_builder_unique.go @@ -195,11 +195,11 @@ type uniqueCheckHelper struct { // uniqueOrdinals are the table ordinals of the unique columns in the table // that is being mutated. They correspond 1-to-1 to the columns in the // UniqueConstraint. - uniqueOrdinals intsets.FastIntSet + uniqueOrdinals intsets.Fast // primaryKeyOrdinals includes the ordinals from any primary key columns // that are not included in uniqueOrdinals. - primaryKeyOrdinals intsets.FastIntSet + primaryKeyOrdinals intsets.Fast // The scope and column ordinals of the scan that will serve as the right // side of the semi join for the uniqueness checks. @@ -220,7 +220,7 @@ func (h *uniqueCheckHelper) init(mb *mutationBuilder, uniqueOrdinal int) bool { uniqueOrdinal: uniqueOrdinal, } - var uniqueOrds intsets.FastIntSet + var uniqueOrds intsets.Fast for i, n := 0, h.unique.ColumnCount(); i < n; i++ { uniqueOrds.Add(h.unique.ColumnOrdinal(mb.tab, i)) } diff --git a/pkg/sql/opt/partialidx/implicator.go b/pkg/sql/opt/partialidx/implicator.go index 1904928a163f..4b0be97be5bd 100644 --- a/pkg/sql/opt/partialidx/implicator.go +++ b/pkg/sql/opt/partialidx/implicator.go @@ -208,7 +208,7 @@ func (im *Implicator) FiltersImplyPredicate( func (im *Implicator) filtersImplyPredicateFastPath( filters memo.FiltersExpr, pred memo.FiltersExpr, ) (remainingFilters memo.FiltersExpr, ok bool) { - var filtersToRemove intsets.FastIntSet + var filtersToRemove intsets.Fast // For every FiltersItem in pred, search for a matching FiltersItem in // filters. diff --git a/pkg/sql/opt/partition/locality.go b/pkg/sql/opt/partition/locality.go index 123c1b637167..30ebe4179194 100644 --- a/pkg/sql/opt/partition/locality.go +++ b/pkg/sql/opt/partition/locality.go @@ -63,7 +63,7 @@ type PrefixSorter struct { // The set of ordinal numbers indexing into the Entry slice, representing // which Prefixes (partitions) are 100% local to the gateway region - LocalPartitions intsets.FastIntSet + LocalPartitions intsets.Fast } // String returns a string representation of the PrefixSorter. @@ -150,13 +150,13 @@ func PrefixesToString(prefixes []Prefix) string { // determined. func HasMixOfLocalAndRemotePartitions( evalCtx *eval.Context, index cat.Index, -) (localPartitions intsets.FastIntSet, ok bool) { +) (localPartitions intsets.Fast, ok bool) { if index.PartitionCount() < 2 { - return intsets.FastIntSet{}, false + return intsets.Fast{}, false } var localRegion string if localRegion, ok = evalCtx.GetLocalRegion(); !ok { - return intsets.FastIntSet{}, false + return intsets.Fast{}, false } var foundLocal, foundRemote bool for i, n := 0, index.PartitionCount(); i < n; i++ { @@ -177,7 +177,7 @@ func HasMixOfLocalAndRemotePartitions( // group of equal-length prefixes they are ordered by value. // This is the main function for building a PrefixSorter. func GetSortedPrefixes( - index cat.Index, localPartitions intsets.FastIntSet, evalCtx *eval.Context, + index cat.Index, localPartitions intsets.Fast, evalCtx *eval.Context, ) PrefixSorter { if index == nil || index.PartitionCount() < 2 { return PrefixSorter{} diff --git a/pkg/sql/opt/partition/locality_test.go b/pkg/sql/opt/partition/locality_test.go index f8da0f221201..2f21377af5ca 100644 --- a/pkg/sql/opt/partition/locality_test.go +++ b/pkg/sql/opt/partition/locality_test.go @@ -93,7 +93,7 @@ func TestPrefixSorter(t *testing.T) { // only has the partitions and ps (PrefixSorter) elements populated. partKeys := parsePartitionKeys(&evalCtx, tc.partitionKeys) partitions := make([]testcat.Partition, len(partKeys)) - localPartitions := intsets.FastIntSet{} + localPartitions := intsets.Fast{} for j, partitionKey := range partKeys { partitionDatums := make([]tree.Datums, 1) partitionDatums[0] = partitionKey diff --git a/pkg/sql/opt/props/func_dep.go b/pkg/sql/opt/props/func_dep.go index 4ec8fc3d7c93..8c449a07151a 100644 --- a/pkg/sql/opt/props/func_dep.go +++ b/pkg/sql/opt/props/func_dep.go @@ -710,7 +710,7 @@ func (f *FuncDepSet) ComputeEquivClosure(cols opt.ColSet) opt.ColSet { // ComputeEquivClosureNoCopy is similar to ComputeEquivClosure, but computes the // closure in-place (e.g. the argument ColSet will be mutated). It should only // be used when it is ok to mutate the argument. This avoids allocations when -// columns overflow the small set of intsets.FastIntSet. +// columns overflow the small set of intsets.Fast. func (f *FuncDepSet) ComputeEquivClosureNoCopy(cols opt.ColSet) opt.ColSet { // Don't need to get transitive closure, because equivalence closures are // already maintained for every column. diff --git a/pkg/sql/opt/props/physical/distribution.go b/pkg/sql/opt/props/physical/distribution.go index f1108b5730c0..309f3671f5ee 100644 --- a/pkg/sql/opt/props/physical/distribution.go +++ b/pkg/sql/opt/props/physical/distribution.go @@ -34,7 +34,7 @@ type Distribution struct { // TODO(rytaft): Consider abstracting this to a list of "neighborhoods" to // support more different types of localities. // TODO(rytaft): Consider mapping the region strings to integers and storing - // this as a FastIntSet. + // this as a intsets.Fast. Regions []string } diff --git a/pkg/sql/opt/schema_dependencies.go b/pkg/sql/opt/schema_dependencies.go index aed53509938d..20fa611e4ea1 100644 --- a/pkg/sql/opt/schema_dependencies.go +++ b/pkg/sql/opt/schema_dependencies.go @@ -28,7 +28,7 @@ type SchemaDep struct { // ColumnOrdinals is the set of column ordinals that are referenced for this // table. - ColumnOrdinals intsets.FastIntSet + ColumnOrdinals intsets.Fast // ColumnIDToOrd maps a scopeColumn's ColumnID to its ColumnOrdinal. This // helps us add only the columns that are actually referenced by the object's @@ -44,7 +44,7 @@ type SchemaDep struct { // SchemaTypeDeps contains a set of the IDs of types that // this object depends on. -type SchemaTypeDeps = intsets.FastIntSet +type SchemaTypeDeps = intsets.Fast // GetColumnNames returns a sorted list of the names of the column dependencies // and a boolean to determine if the dependency was a table. diff --git a/pkg/sql/opt/testutils/opttester/opt_tester.go b/pkg/sql/opt/testutils/opttester/opt_tester.go index 5153c010d29e..8a85d1db9615 100644 --- a/pkg/sql/opt/testutils/opttester/opt_tester.go +++ b/pkg/sql/opt/testutils/opttester/opt_tester.go @@ -110,7 +110,7 @@ var ( ) // RuleSet efficiently stores an unordered set of RuleNames. -type RuleSet = intsets.FastIntSet +type RuleSet = intsets.Fast // OptTester is a helper for testing the various optimizer components. It // contains the boiler-plate code for the following useful tasks: @@ -210,7 +210,7 @@ type Flags struct { // IgnoreTables specifies the subset of stats tables which should not be // outputted by the stats-quality command. - IgnoreTables intsets.FastIntSet + IgnoreTables intsets.Fast // File specifies the name of the file to import. This field is only used by // the import command. @@ -864,10 +864,10 @@ func fillInLazyProps(e opt.Expr) { rel = rel.FirstExpr() // Derive columns that are candidates for pruning. - norm.DerivePruneCols(rel, intsets.FastIntSet{} /* disabledRules */) + norm.DerivePruneCols(rel, intsets.Fast{} /* disabledRules */) // Derive columns that are candidates for null rejection. - norm.DeriveRejectNullCols(rel, intsets.FastIntSet{} /* disabledRules */) + norm.DeriveRejectNullCols(rel, intsets.Fast{} /* disabledRules */) // Make sure the interesting orderings are calculated. ordering.DeriveInterestingOrderings(rel) @@ -1027,7 +1027,7 @@ func (f *Flags) Set(arg datadriven.CmdArg) error { f.Table = arg.Vals[0] case "ignore-tables": - var tables intsets.FastIntSet + var tables intsets.Fast addTables := func(val string) error { table, err := strconv.Atoi(val) if err != nil { diff --git a/pkg/sql/opt/testutils/testcat/create_table.go b/pkg/sql/opt/testutils/testcat/create_table.go index a7efba32a2ca..ec68cf698267 100644 --- a/pkg/sql/opt/testutils/testcat/create_table.go +++ b/pkg/sql/opt/testutils/testcat/create_table.go @@ -812,7 +812,7 @@ func (tt *Table) addIndexWithVersion( } if typ == primaryIndex { - var pkOrdinals intsets.FastIntSet + var pkOrdinals intsets.Fast for _, c := range idx.Columns { pkOrdinals.Add(c.Ordinal()) } diff --git a/pkg/sql/opt/xform/coster.go b/pkg/sql/opt/xform/coster.go index 1babd94ad248..98ba9118f6d1 100644 --- a/pkg/sql/opt/xform/coster.go +++ b/pkg/sql/opt/xform/coster.go @@ -796,7 +796,7 @@ func (c *coster) computeSelectCost(sel *memo.SelectExpr, required *physical.Requ inputRowCount = math.Min(inputRowCount, required.LimitHint/selectivity) } - filterSetup, filterPerRow := c.computeFiltersCost(sel.Filters, intsets.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(sel.Filters, intsets.Fast{}) cost := memo.Cost(inputRowCount) * filterPerRow cost += filterSetup return cost @@ -894,7 +894,7 @@ func (c *coster) computeMergeJoinCost(join *memo.MergeJoinExpr) memo.Cost { // smaller right side is preferred to the symmetric join. cost := memo.Cost(0.9*leftRowCount+1.1*rightRowCount) * cpuCostFactor - filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.Fast{}) cost += filterSetup // Add the CPU cost of emitting the rows. @@ -1017,7 +1017,7 @@ func (c *coster) computeIndexLookupJoinCost( } cost := memo.Cost(lookupCount) * perLookupCost - filterSetup, filterPerRow := c.computeFiltersCost(on, intsets.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(on, intsets.Fast{}) cost += filterSetup // Each lookup might retrieve many rows; add the IO cost of retrieving the @@ -1096,7 +1096,7 @@ func (c *coster) computeInvertedJoinCost( perLookupCost *= 5 cost := memo.Cost(lookupCount) * perLookupCost - filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.Fast{}) cost += filterSetup // Each lookup might retrieve many rows; add the IO cost of retrieving the @@ -1146,7 +1146,7 @@ func (c *coster) computeExprCost(expr opt.Expr) memo.Cost { // because they do not add to the cost. This can happen when a condition still // exists in the filters even though it is handled by the join. func (c *coster) computeFiltersCost( - filters memo.FiltersExpr, filtersToSkip intsets.FastIntSet, + filters memo.FiltersExpr, filtersToSkip intsets.Fast, ) (setupCost, perRowCost memo.Cost) { // Add a base perRowCost so that callers do not need to have their own // base per-row cost. @@ -1182,7 +1182,7 @@ func (c *coster) computeZigzagJoinCost(join *memo.ZigzagJoinExpr) memo.Cost { scanCost := c.rowScanCost(join.LeftTable, join.LeftIndex, leftCols) scanCost += c.rowScanCost(join.RightTable, join.RightIndex, rightCols) - filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.FastIntSet{}) + filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.Fast{}) // It is much more expensive to do a seek in zigzag join vs. lookup join // because zigzag join starts a new scan for every seek via diff --git a/pkg/sql/opt/xform/explorer.go b/pkg/sql/opt/xform/explorer.go index 0d7630709adb..a41e74dc9af9 100644 --- a/pkg/sql/opt/xform/explorer.go +++ b/pkg/sql/opt/xform/explorer.go @@ -239,7 +239,7 @@ type exploreState struct { // fullyExploredMembers is a set of ordinal positions of members within the // memo group. Once a member expression has been fully explored, its ordinal // is added to this set. - fullyExploredMembers intsets.FastIntSet + fullyExploredMembers intsets.Fast } // isMemberFullyExplored is true if the member at the given ordinal position diff --git a/pkg/sql/opt/xform/join_funcs.go b/pkg/sql/opt/xform/join_funcs.go index cfe2315b7eab..18f4ff56d28b 100644 --- a/pkg/sql/opt/xform/join_funcs.go +++ b/pkg/sql/opt/xform/join_funcs.go @@ -1389,16 +1389,14 @@ func (c *CustomFuncs) GetLocalityOptimizedLookupJoinExprs( // getLocalValues returns the indexes of the values in the given Datums slice // that target local partitions. -func (c *CustomFuncs) getLocalValues( - values tree.Datums, ps partition.PrefixSorter, -) intsets.FastIntSet { +func (c *CustomFuncs) getLocalValues(values tree.Datums, ps partition.PrefixSorter) intsets.Fast { // The PrefixSorter has collected all the prefixes from all the different // partitions (remembering which ones came from local partitions), and has // sorted them so that longer prefixes come before shorter prefixes. For each // span in the scanConstraint, we will iterate through the list of prefixes // until we find a match, so ordering them with longer prefixes first ensures // that the correct match is found. - var localVals intsets.FastIntSet + var localVals intsets.Fast for i, val := range values { if match, ok := constraint.FindMatchOnSingleColumn(val, ps); ok { if match.IsLocal { @@ -1413,7 +1411,7 @@ func (c *CustomFuncs) getLocalValues( // by putting the Datums at positions identified by localValOrds into the local // slice, and the remaining Datums into the remote slice. func (c *CustomFuncs) splitValues( - values tree.Datums, localValOrds intsets.FastIntSet, + values tree.Datums, localValOrds intsets.Fast, ) (localVals, remoteVals tree.Datums) { localVals = make(tree.Datums, 0, localValOrds.Len()) remoteVals = make(tree.Datums, 0, len(values)-len(localVals)) diff --git a/pkg/sql/opt/xform/join_order_builder.go b/pkg/sql/opt/xform/join_order_builder.go index 26731d0b5a4d..14a7df387c1a 100644 --- a/pkg/sql/opt/xform/join_order_builder.go +++ b/pkg/sql/opt/xform/join_order_builder.go @@ -1767,7 +1767,7 @@ func getOpIdx(e *edge) int { } } -type edgeSet = intsets.FastIntSet +type edgeSet = intsets.Fast type bitSet uint64 diff --git a/pkg/sql/opt/xform/optimizer.go b/pkg/sql/opt/xform/optimizer.go index d3408022804b..3e475f53faad 100644 --- a/pkg/sql/opt/xform/optimizer.go +++ b/pkg/sql/opt/xform/optimizer.go @@ -40,7 +40,7 @@ type MatchedRuleFunc = norm.MatchedRuleFunc type AppliedRuleFunc = norm.AppliedRuleFunc // RuleSet efficiently stores an unordered set of RuleNames. -type RuleSet = intsets.FastIntSet +type RuleSet = intsets.Fast // Optimizer transforms an input expression tree into the logically equivalent // output expression tree with the lowest possible execution cost. @@ -950,7 +950,7 @@ type groupState struct { // expression in the group that has been fully optimized for the required // properties. These never need to be recosted, no matter how many additional // optimization passes are made. - fullyOptimizedExprs intsets.FastIntSet + fullyOptimizedExprs intsets.Fast // explore is used by the explorer to store intermediate state so that // redundant work is minimized. @@ -1004,7 +1004,7 @@ func (a *groupStateAlloc) allocate() *groupState { // disableRulesRandom disables rules with the given probability for testing. func (o *Optimizer) disableRulesRandom(probability float64) { - essentialRules := intsets.MakeFastIntSet( + essentialRules := intsets.MakeFast( // Needed to prevent constraint building from failing. int(opt.NormalizeInConst), // Needed when an index is forced. diff --git a/pkg/sql/opt/xform/scan_funcs.go b/pkg/sql/opt/xform/scan_funcs.go index 4284481b17c4..8680461d5f5b 100644 --- a/pkg/sql/opt/xform/scan_funcs.go +++ b/pkg/sql/opt/xform/scan_funcs.go @@ -360,10 +360,10 @@ func (c *CustomFuncs) buildAllPartitionsConstraint( // target local partitions. func (c *CustomFuncs) getLocalSpans( scanConstraint *constraint.Constraint, ps partition.PrefixSorter, -) intsets.FastIntSet { +) intsets.Fast { // Iterate through the spans and determine whether each one matches // with a prefix from a local partition. - var localSpans intsets.FastIntSet + var localSpans intsets.Fast for i, n := 0, scanConstraint.Spans.Count(); i < n; i++ { span := scanConstraint.Spans.Get(i) if match, ok := constraint.FindMatch(span, ps); ok { @@ -379,7 +379,7 @@ func (c *CustomFuncs) getLocalSpans( // by putting the spans at positions identified by localSpanOrds into the local // constraint, and the remaining spans into the remote constraint. func (c *CustomFuncs) splitSpans( - origConstraint *constraint.Constraint, localSpanOrds intsets.FastIntSet, + origConstraint *constraint.Constraint, localSpanOrds intsets.Fast, ) (localConstraint, remoteConstraint constraint.Constraint) { allSpansCount := origConstraint.Spans.Count() localSpansCount := localSpanOrds.Len() diff --git a/pkg/sql/opt/xform/select_funcs.go b/pkg/sql/opt/xform/select_funcs.go index 5a8ce2c871e6..e8b0c4b06735 100644 --- a/pkg/sql/opt/xform/select_funcs.go +++ b/pkg/sql/opt/xform/select_funcs.go @@ -1107,7 +1107,7 @@ func (c *CustomFuncs) GenerateZigzagJoins( iter2.ForEachStartingAfter(leftIndex.Ordinal(), func(rightIndex cat.Index, innerFilters memo.FiltersExpr, rightCols opt.ColSet, _ bool, _ memo.ProjectionsExpr) { // Check if we have zigzag hints. if scanPrivate.Flags.ForceZigzag { - indexes := intsets.MakeFastIntSet(leftIndex.Ordinal(), rightIndex.Ordinal()) + indexes := intsets.MakeFast(leftIndex.Ordinal(), rightIndex.Ordinal()) forceIndexes := scanPrivate.Flags.ZigzagIndexes if !forceIndexes.SubsetOf(indexes) { return diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index a1c2df7674e9..8fd77022e3ca 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -297,7 +297,7 @@ func constructSimpleProjectForPlanNode( } func hasDuplicates(cols []exec.NodeColumnOrdinal) bool { - var set intsets.FastIntSet + var set intsets.Fast for _, c := range cols { if set.Contains(int(c)) { return true diff --git a/pkg/sql/physicalplan/physical_plan.go b/pkg/sql/physicalplan/physical_plan.go index a2cbb1700eb9..f949d19f1aff 100644 --- a/pkg/sql/physicalplan/physical_plan.go +++ b/pkg/sql/physicalplan/physical_plan.go @@ -1149,7 +1149,7 @@ func (p *PhysicalPlan) EnsureSingleStreamPerNode( forceSerialization bool, post execinfrapb.PostProcessSpec, ) { // Fast path - check if we need to do anything. - var nodes intsets.FastIntSet + var nodes intsets.Fast var foundDuplicates bool for _, pIdx := range p.ResultRouters { proc := &p.Processors[pIdx] diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index 00925741ff39..34f38b9f26b5 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -99,7 +99,7 @@ type tableInfo struct { // The set of indexes into spec.FetchedColumns that are required for columns // in the value part. - neededValueColsByIdx intsets.FastIntSet + neededValueColsByIdx intsets.Fast // The number of needed columns from the value part of the row. Once we've // seen this number of value columns for a particular row, we can stop diff --git a/pkg/sql/row/helper.go b/pkg/sql/row/helper.go index 3a70f1bdaf0d..7028d367876e 100644 --- a/pkg/sql/row/helper.go +++ b/pkg/sql/row/helper.go @@ -152,7 +152,7 @@ func newRowHelper( func (rh *rowHelper) encodeIndexes( colIDtoRowIndex catalog.TableColMap, values []tree.Datum, - ignoreIndexes intsets.FastIntSet, + ignoreIndexes intsets.Fast, includeEmpty bool, ) ( primaryIndexKey []byte, @@ -203,7 +203,7 @@ func (rh *rowHelper) encodePrimaryIndex( func (rh *rowHelper) encodeSecondaryIndexes( colIDtoRowIndex catalog.TableColMap, values []tree.Datum, - ignoreIndexes intsets.FastIntSet, + ignoreIndexes intsets.Fast, includeEmpty bool, ) (secondaryIndexEntries map[catalog.Index][]rowenc.IndexEntry, err error) { diff --git a/pkg/sql/row/partial_index.go b/pkg/sql/row/partial_index.go index 0e8425121c50..4b7029c51e2b 100644 --- a/pkg/sql/row/partial_index.go +++ b/pkg/sql/row/partial_index.go @@ -24,10 +24,10 @@ import ( // index. type PartialIndexUpdateHelper struct { // IgnoreForPut is a set of index IDs to ignore for Put operations. - IgnoreForPut intsets.FastIntSet + IgnoreForPut intsets.Fast // IgnoreForDel is a set of index IDs to ignore for Del operations. - IgnoreForDel intsets.FastIntSet + IgnoreForDel intsets.Fast } // Init initializes a PartialIndexUpdateHelper to track partial index IDs that diff --git a/pkg/sql/row/row_converter.go b/pkg/sql/row/row_converter.go index 83ce6cab8d6c..f8bc70d33b6b 100644 --- a/pkg/sql/row/row_converter.go +++ b/pkg/sql/row/row_converter.go @@ -206,7 +206,7 @@ type DatumRowConverter struct { // Tracks which column indices in the set of visible columns are part of the // user specified target columns. This can be used before populating Datums // to filter out unwanted column data. - TargetColOrds intsets.FastIntSet + TargetColOrds intsets.Fast // The rest of these are derived from tableDesc, just cached here. ri Inserter diff --git a/pkg/sql/rowenc/index_encoding.go b/pkg/sql/rowenc/index_encoding.go index a223b00bc30b..5a345ab43b71 100644 --- a/pkg/sql/rowenc/index_encoding.go +++ b/pkg/sql/rowenc/index_encoding.go @@ -162,7 +162,7 @@ func MakeSpanFromEncDatums( // retrieve neededCols for the specified table and index. The returned descpb.FamilyIDs // are in sorted order. func NeededColumnFamilyIDs( - neededColOrdinals intsets.FastIntSet, table catalog.TableDescriptor, index catalog.Index, + neededColOrdinals intsets.Fast, table catalog.TableDescriptor, index catalog.Index, ) []descpb.FamilyID { if table.NumFamilies() == 1 { return []descpb.FamilyID{table.GetFamilies()[0].ID} @@ -171,9 +171,9 @@ func NeededColumnFamilyIDs( // Build some necessary data structures for column metadata. columns := table.DeletableColumns() colIdxMap := catalog.ColumnIDToOrdinalMap(columns) - var indexedCols intsets.FastIntSet - var compositeCols intsets.FastIntSet - var extraCols intsets.FastIntSet + var indexedCols intsets.Fast + var compositeCols intsets.Fast + var extraCols intsets.Fast for i := 0; i < index.NumKeyColumns(); i++ { columnID := index.GetKeyColumnID(i) columnOrdinal := colIdxMap.GetDefault(columnID) diff --git a/pkg/sql/rowexec/aggregator_test.go b/pkg/sql/rowexec/aggregator_test.go index 7c4a0e6269cf..3b278c40755f 100644 --- a/pkg/sql/rowexec/aggregator_test.go +++ b/pkg/sql/rowexec/aggregator_test.go @@ -632,7 +632,7 @@ func makeGroupedIntRows(groupSize, numCols int, groupedCols []int) rowenc.EncDat numRows := intPow(groupSize, len(groupedCols)+1) rows := make(rowenc.EncDatumRows, numRows) - groupColSet := intsets.MakeFastIntSet(groupedCols...) + groupColSet := intsets.MakeFast(groupedCols...) getGroupedColVal := func(rowIdx, colIdx int) int { rank := -1 for i, c := range groupedCols { diff --git a/pkg/sql/rowexec/joinreader_test.go b/pkg/sql/rowexec/joinreader_test.go index c853d1b8909b..604eda2554ff 100644 --- a/pkg/sql/rowexec/joinreader_test.go +++ b/pkg/sql/rowexec/joinreader_test.go @@ -1122,7 +1122,7 @@ func TestJoinReader(t *testing.T) { index := td.ActiveIndexes()[c.indexIdx] var fetchColIDs []descpb.ColumnID - var neededOrds intsets.FastIntSet + var neededOrds intsets.Fast for _, ord := range c.fetchCols { neededOrds.Add(int(ord)) fetchColIDs = append(fetchColIDs, td.PublicColumns()[ord].GetID()) @@ -1588,7 +1588,7 @@ func TestIndexJoiner(t *testing.T) { ); err != nil { t.Fatal(err) } - splitter := span.MakeSplitter(c.desc, c.desc.GetPrimaryIndex(), intsets.MakeFastIntSet(0, 1, 2, 3)) + splitter := span.MakeSplitter(c.desc, c.desc.GetPrimaryIndex(), intsets.MakeFast(0, 1, 2, 3)) spec := execinfrapb.JoinReaderSpec{ FetchSpec: fetchSpec, diff --git a/pkg/sql/rowexec/mergejoiner.go b/pkg/sql/rowexec/mergejoiner.go index d2689ae1c81b..3d1d19964e7a 100644 --- a/pkg/sql/rowexec/mergejoiner.go +++ b/pkg/sql/rowexec/mergejoiner.go @@ -40,7 +40,7 @@ type mergeJoiner struct { leftIdx, rightIdx int trackMatchedRight bool emitUnmatchedRight bool - matchedRight intsets.FastIntSet + matchedRight intsets.Fast matchedRightCount int streamMerger streamMerger @@ -246,7 +246,7 @@ func (m *mergeJoiner) nextRow() (rowenc.EncDatumRow, *execinfrapb.ProducerMetada m.emitUnmatchedRight = shouldEmitUnmatchedRow(rightSide, m.joinType) m.leftIdx, m.rightIdx = 0, 0 if m.trackMatchedRight { - m.matchedRight = intsets.FastIntSet{} + m.matchedRight = intsets.Fast{} } } } diff --git a/pkg/sql/rowexec/sample_aggregator.go b/pkg/sql/rowexec/sample_aggregator.go index 0e88b6fb1660..91f6fb530ab1 100644 --- a/pkg/sql/rowexec/sample_aggregator.go +++ b/pkg/sql/rowexec/sample_aggregator.go @@ -135,7 +135,7 @@ func newSampleAggregator( invSketch: make(map[uint32]*sketchInfo, len(spec.InvertedSketches)), } - var sampleCols intsets.FastIntSet + var sampleCols intsets.Fast for i := range spec.Sketches { s.sketches[i] = sketchInfo{ spec: spec.Sketches[i], @@ -157,7 +157,7 @@ func newSampleAggregator( // The datums are converted to their inverted index bytes and sent as a // single DBytes column. We do not use DEncodedKey here because it would // introduce backward compatibility complications. - var srCols intsets.FastIntSet + var srCols intsets.Fast srCols.Add(0) sr.Init(int(spec.SampleSize), int(spec.MinSampleSize), bytesRowType, &s.memAcc, srCols) col := spec.InvertedSketches[i].Columns[0] diff --git a/pkg/sql/rowexec/sampler.go b/pkg/sql/rowexec/sampler.go index f88f01f1c5d5..6bfd6d49b093 100644 --- a/pkg/sql/rowexec/sampler.go +++ b/pkg/sql/rowexec/sampler.go @@ -129,7 +129,7 @@ func newSamplerProcessor( } inTypes := input.OutputTypes() - var sampleCols intsets.FastIntSet + var sampleCols intsets.Fast for i := range spec.Sketches { s.sketches[i] = sketchInfo{ spec: spec.Sketches[i], @@ -145,7 +145,7 @@ func newSamplerProcessor( var sr stats.SampleReservoir // The datums are converted to their inverted index bytes and // sent as single DBytes column. - var srCols intsets.FastIntSet + var srCols intsets.Fast srCols.Add(0) sr.Init(int(spec.SampleSize), int(spec.MinSampleSize), bytesRowType, &s.memAcc, srCols) col := spec.InvertedSketches[i].Columns[0] diff --git a/pkg/sql/schemachanger/rel/query.go b/pkg/sql/schemachanger/rel/query.go index 00eefe89d945..3fadf606fc52 100644 --- a/pkg/sql/schemachanger/rel/query.go +++ b/pkg/sql/schemachanger/rel/query.go @@ -127,7 +127,7 @@ func (q *Query) putEvalContext(ec *evalContext) { // Entities returns the entities in the query in their join order. // This method exists primarily for introspection. func (q *Query) Entities() []Var { - var entitySlots intsets.FastIntSet + var entitySlots intsets.Fast for _, slotIdx := range q.entities { entitySlots.Add(int(slotIdx)) } diff --git a/pkg/sql/schemachanger/rel/query_data.go b/pkg/sql/schemachanger/rel/query_data.go index 5d56cb22add6..b3ddb3718a69 100644 --- a/pkg/sql/schemachanger/rel/query_data.go +++ b/pkg/sql/schemachanger/rel/query_data.go @@ -129,7 +129,7 @@ func (s *slot) reset() { } func maybeSet( - slots []slot, idx slotIdx, tv typedValue, set *intsets.FastIntSet, + slots []slot, idx slotIdx, tv typedValue, set *intsets.Fast, ) (foundContradiction bool) { s := &slots[idx] diff --git a/pkg/sql/schemachanger/rel/query_eval.go b/pkg/sql/schemachanger/rel/query_eval.go index 154fbac15f82..b5515becbced 100644 --- a/pkg/sql/schemachanger/rel/query_eval.go +++ b/pkg/sql/schemachanger/rel/query_eval.go @@ -155,7 +155,7 @@ func (ec *evalContext) iterateNext() error { func (ec *evalContext) visit(e entity) error { // Keep track of which slots were filled as part of this step in the // evaluation and then unset them when we pop out of this stack frame. - var slotsFilled intsets.FastIntSet + var slotsFilled intsets.Fast defer func() { slotsFilled.ForEach(func(i int) { ec.slots[i].reset() }) }() @@ -312,7 +312,7 @@ func (ec *evalContext) buildWhere() ( } // unify is like unifyReturningContradiction but it does not return the fact. -func unify(facts []fact, s []slot, slotsFilled *intsets.FastIntSet) (contradictionFound bool) { +func unify(facts []fact, s []slot, slotsFilled *intsets.Fast) (contradictionFound bool) { contradictionFound, _ = unifyReturningContradiction(facts, s, slotsFilled) return contradictionFound } @@ -322,7 +322,7 @@ func unify(facts []fact, s []slot, slotsFilled *intsets.FastIntSet) (contradicti // contradiction is returned. Any slots set in the process of unification // are recorded into the set. func unifyReturningContradiction( - facts []fact, s []slot, slotsFilled *intsets.FastIntSet, + facts []fact, s []slot, slotsFilled *intsets.Fast, ) (contradictionFound bool, contradicted fact) { // TODO(ajwerner): As we unify we could determine that some facts are no // longer relevant. When we do that we could move them to the front and keep diff --git a/pkg/sql/schemachanger/rel/reltest/database.go b/pkg/sql/schemachanger/rel/reltest/database.go index 6585e33106ed..34c42aac4200 100644 --- a/pkg/sql/schemachanger/rel/reltest/database.go +++ b/pkg/sql/schemachanger/rel/reltest/database.go @@ -98,13 +98,13 @@ func (qc QueryTest) run(t *testing.T, indexes int, db *rel.Database) { results = append(results, cur) return nil }); testutils.IsError(err, `failed to find index to satisfy query`) { - if intsets.MakeFastIntSet(qc.UnsatisfiableIndexes...).Contains(indexes) { + if intsets.MakeFast(qc.UnsatisfiableIndexes...).Contains(indexes) { return } t.Fatalf("expected to succeed with indexes %d: %v", indexes, err) } else if err != nil { t.Fatal(err) - } else if intsets.MakeFastIntSet(qc.UnsatisfiableIndexes...).Contains(indexes) { + } else if intsets.MakeFast(qc.UnsatisfiableIndexes...).Contains(indexes) { t.Fatalf("expected to fail with indexes %d", indexes) } expResults := append(qc.Results[:0:0], qc.Results...) diff --git a/pkg/sql/schemachanger/scexec/backfiller/tracker.go b/pkg/sql/schemachanger/scexec/backfiller/tracker.go index dad16e59bf81..ec55669e4a38 100644 --- a/pkg/sql/schemachanger/scexec/backfiller/tracker.go +++ b/pkg/sql/schemachanger/scexec/backfiller/tracker.go @@ -438,7 +438,7 @@ type progressReportFlags struct { } func sameIndexIDSet(ds []descpb.IndexID, ds2 []descpb.IndexID) bool { - toSet := func(ids []descpb.IndexID) (s intsets.FastIntSet) { + toSet := func(ids []descpb.IndexID) (s intsets.Fast) { for _, id := range ids { s.Add(int(id)) } @@ -465,7 +465,7 @@ type mergeKey struct { } func toMergeKey(m scexec.Merge) mergeKey { - var ids intsets.FastIntSet + var ids intsets.Fast for _, id := range m.SourceIndexIDs { ids.Add(int(id)) } diff --git a/pkg/sql/schemachanger/scexec/gc_jobs.go b/pkg/sql/schemachanger/scexec/gc_jobs.go index 196039c4e753..097593adef44 100644 --- a/pkg/sql/schemachanger/scexec/gc_jobs.go +++ b/pkg/sql/schemachanger/scexec/gc_jobs.go @@ -82,7 +82,7 @@ func (gj gcJobs) makeRecords( ) (dbZoneConfigsToRemove catalog.DescriptorIDSet, gcJobRecords []jobs.Record) { type stmts struct { s []scop.StatementForDropJob - set intsets.FastIntSet + set intsets.Fast } addStmt := func(s *stmts, stmt scop.StatementForDropJob) { if id := int(stmt.StatementID); !s.set.Contains(id) { diff --git a/pkg/sql/sem/builtins/geo_builtins.go b/pkg/sql/sem/builtins/geo_builtins.go index 2623a202c558..f3bd40b0dfd0 100644 --- a/pkg/sql/sem/builtins/geo_builtins.go +++ b/pkg/sql/sem/builtins/geo_builtins.go @@ -7425,7 +7425,7 @@ func appendStrArgOverloadForGeometryArgOverloads(def builtinDefinition) builtinD } // Find all argument indexes that have the Geometry type. - var argsToCast intsets.FastIntSet + var argsToCast intsets.Fast for i, paramType := range paramTypes { if paramType.Typ.Equal(types.Geometry) { argsToCast.Add(i) diff --git a/pkg/sql/sem/catid/index_id_set.go b/pkg/sql/sem/catid/index_id_set.go index 9b9e3465ae35..9a6330f09e2d 100644 --- a/pkg/sql/sem/catid/index_id_set.go +++ b/pkg/sql/sem/catid/index_id_set.go @@ -14,7 +14,7 @@ import "github.com/cockroachdb/cockroach/pkg/util/intsets" // IndexSet efficiently stores an unordered set of index ids. type IndexSet struct { - set intsets.FastIntSet + set intsets.Fast } // MakeIndexIDSet returns a set initialized with the given values. diff --git a/pkg/sql/sem/tree/constant.go b/pkg/sql/sem/tree/constant.go index d185123d781b..20c44dd9d90e 100644 --- a/pkg/sql/sem/tree/constant.go +++ b/pkg/sql/sem/tree/constant.go @@ -441,7 +441,7 @@ func intersectTypeSlices(xs, ys []*types.T) (out []*types.T) { // The function takes a slice of Exprs and indexes, but expects all the indexed // Exprs to wrap a Constant. The reason it does no take a slice of Constants // instead is to avoid forcing callers to allocate separate slices of Constant. -func commonConstantType(vals []Expr, idxs intsets.FastIntSet) (*types.T, bool) { +func commonConstantType(vals []Expr, idxs intsets.Fast) (*types.T, bool) { var candidates []*types.T for i, ok := idxs.Next(0); ok; i, ok = idxs.Next(i + 1) { diff --git a/pkg/sql/sem/tree/overload.go b/pkg/sql/sem/tree/overload.go index 8e9106498d4c..b10082a1916f 100644 --- a/pkg/sql/sem/tree/overload.go +++ b/pkg/sql/sem/tree/overload.go @@ -564,9 +564,9 @@ type overloadTypeChecker struct { overloadIdxs []uint8 // index into overloads exprs []Expr typedExprs []TypedExpr - resolvableIdxs intsets.FastIntSet // index into exprs/typedExprs - constIdxs intsets.FastIntSet // index into exprs/typedExprs - placeholderIdxs intsets.FastIntSet // index into exprs/typedExprs + resolvableIdxs intsets.Fast // index into exprs/typedExprs + constIdxs intsets.Fast // index into exprs/typedExprs + placeholderIdxs intsets.Fast // index into exprs/typedExprs overloadsIdxArr [16]uint8 } @@ -617,9 +617,9 @@ func (s *overloadTypeChecker) release() { } s.typedExprs = s.typedExprs[:0] s.overloadIdxs = s.overloadIdxs[:0] - s.resolvableIdxs = intsets.FastIntSet{} - s.constIdxs = intsets.FastIntSet{} - s.placeholderIdxs = intsets.FastIntSet{} + s.resolvableIdxs = intsets.Fast{} + s.constIdxs = intsets.Fast{} + s.placeholderIdxs = intsets.Fast{} overloadTypeCheckerPool.Put(s) } @@ -720,7 +720,7 @@ func (s *overloadTypeChecker) typeCheckOverloadedExprs( // Filter out overloads on resolved types. This includes resolved placeholders // and any other resolvable exprs. - var typeableIdxs = intsets.FastIntSet{} + var typeableIdxs intsets.Fast for i, ok := s.resolvableIdxs.Next(0); ok; i, ok = s.resolvableIdxs.Next(i + 1) { typeableIdxs.Add(i) } diff --git a/pkg/sql/sem/tree/type_check.go b/pkg/sql/sem/tree/type_check.go index 53a65bf9b7ed..9dd5eb35c06a 100644 --- a/pkg/sql/sem/tree/type_check.go +++ b/pkg/sql/sem/tree/type_check.go @@ -1085,7 +1085,7 @@ func (expr *FuncExpr) TypeCheck( return nil, pgerror.Wrapf(err, pgcode.InvalidParameterValue, "%s()", def.Name) } - var calledOnNullInputFns, notCalledOnNullInputFns intsets.FastIntSet + var calledOnNullInputFns, notCalledOnNullInputFns intsets.Fast for _, idx := range s.overloadIdxs { if def.Overloads[idx].CalledOnNullInput { calledOnNullInputFns.Add(int(idx)) @@ -1107,7 +1107,7 @@ func (expr *FuncExpr) TypeCheck( if funcCls == AggregateClass { for i := range s.typedExprs { if s.typedExprs[i].ResolvedType().Family() == types.UnknownFamily { - var filtered intsets.FastIntSet + var filtered intsets.Fast for j, ok := notCalledOnNullInputFns.Next(0); ok; j, ok = notCalledOnNullInputFns.Next(j + 1) { if def.Overloads[j].params().GetAt(i).Equivalent(types.String) { filtered.Add(j) @@ -2356,9 +2356,9 @@ type typeCheckExprsState struct { exprs []Expr typedExprs []TypedExpr - constIdxs intsets.FastIntSet // index into exprs/typedExprs - placeholderIdxs intsets.FastIntSet // index into exprs/typedExprs - resolvableIdxs intsets.FastIntSet // index into exprs/typedExprs + constIdxs intsets.Fast // index into exprs/typedExprs + placeholderIdxs intsets.Fast // index into exprs/typedExprs + resolvableIdxs intsets.Fast // index into exprs/typedExprs } // typeCheckSameTypedExprs type checks a list of expressions, asserting that all @@ -2588,11 +2588,7 @@ func typeCheckConstsAndPlaceholdersWithDesired( // - All other Exprs func typeCheckSplitExprs( exprs []Expr, -) ( - constIdxs intsets.FastIntSet, - placeholderIdxs intsets.FastIntSet, - resolvableIdxs intsets.FastIntSet, -) { +) (constIdxs intsets.Fast, placeholderIdxs intsets.Fast, resolvableIdxs intsets.Fast) { for i, expr := range exprs { switch { case isConstant(expr): diff --git a/pkg/sql/span/span_splitter.go b/pkg/sql/span/span_splitter.go index 3adad02531d4..181e6cc0aff3 100644 --- a/pkg/sql/span/span_splitter.go +++ b/pkg/sql/span/span_splitter.go @@ -46,7 +46,7 @@ func NoopSplitter() Splitter { // the NoopSplitter (which never splits). // Note: this splitter should **not** be used for deletes. func MakeSplitter( - table catalog.TableDescriptor, index catalog.Index, neededColOrdinals intsets.FastIntSet, + table catalog.TableDescriptor, index catalog.Index, neededColOrdinals intsets.Fast, ) Splitter { return MakeSplitterForDelete(table, index, neededColOrdinals, false /* forDelete */) } @@ -56,7 +56,7 @@ func MakeSplitter( func MakeSplitterForDelete( table catalog.TableDescriptor, index catalog.Index, - neededColOrdinals intsets.FastIntSet, + neededColOrdinals intsets.Fast, forDelete bool, ) Splitter { // We can only split a span into separate family specific point lookups if: diff --git a/pkg/sql/span/span_splitter_test.go b/pkg/sql/span/span_splitter_test.go index 0a527f047795..9e3248242d2e 100644 --- a/pkg/sql/span/span_splitter_test.go +++ b/pkg/sql/span/span_splitter_test.go @@ -30,7 +30,7 @@ func TestSpanSplitterDoesNotSplitSystemTableFamilySpans(t *testing.T) { splitter := span.MakeSplitter( systemschema.DescriptorTable, systemschema.DescriptorTable.GetPrimaryIndex(), - intsets.MakeFastIntSet(0), + intsets.MakeFast(0), ) if res := splitter.CanSplitSpanIntoFamilySpans(1, false); res { @@ -49,7 +49,7 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql string index string prefixLen int - neededColumns intsets.FastIntSet + neededColumns intsets.Fast containsNull bool canSplit bool }{ @@ -57,35 +57,35 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql: "a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), FAMILY (a, b, c), FAMILY (d)", index: "t_pkey", prefixLen: 2, - neededColumns: intsets.MakeFastIntSet(0), + neededColumns: intsets.MakeFast(0), canSplit: true, }, { sql: "a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), FAMILY (a, b, c), FAMILY (d)", index: "t_pkey", prefixLen: 1, - neededColumns: intsets.MakeFastIntSet(0), + neededColumns: intsets.MakeFast(0), canSplit: false, }, { sql: "a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), FAMILY (a, b, c, d)", index: "t_pkey", prefixLen: 2, - neededColumns: intsets.MakeFastIntSet(0), + neededColumns: intsets.MakeFast(0), canSplit: true, }, { sql: "a INT, b INT, c INT, INDEX i (b) STORING (a, c), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: intsets.MakeFastIntSet(0), + neededColumns: intsets.MakeFast(0), canSplit: false, }, { sql: "a INT, b INT, c INT, UNIQUE INDEX i (b) STORING (a, c), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: intsets.MakeFastIntSet(0), + neededColumns: intsets.MakeFast(0), containsNull: true, canSplit: false, }, @@ -93,7 +93,7 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql: "a INT, b INT, c INT, UNIQUE INDEX i (b) STORING (a, c), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: intsets.MakeFastIntSet(0), + neededColumns: intsets.MakeFast(0), containsNull: false, canSplit: true, }, @@ -101,7 +101,7 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql: "a INT, b INT, c INT, UNIQUE INDEX i (b), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: intsets.MakeFastIntSet(0), + neededColumns: intsets.MakeFast(0), containsNull: false, canSplit: true, }, @@ -109,7 +109,7 @@ func TestSpanSplitterCanSplitSpan(t *testing.T) { sql: "a INT, b INT, c INT, UNIQUE INDEX i (b), FAMILY (a), FAMILY (b), FAMILY (c)", index: "i", prefixLen: 1, - neededColumns: intsets.MakeFastIntSet(0), + neededColumns: intsets.MakeFast(0), containsNull: true, canSplit: false, }, diff --git a/pkg/sql/sqlstats/insights/registry.go b/pkg/sql/sqlstats/insights/registry.go index 11dfbf36ae11..7d09f25be21d 100644 --- a/pkg/sql/sqlstats/insights/registry.go +++ b/pkg/sql/sqlstats/insights/registry.go @@ -73,7 +73,7 @@ func (r *lockingRegistry) ObserveTransaction(sessionID clusterunique.ID, transac delete(r.statements, sessionID) defer statements.release() - var slowStatements intsets.FastIntSet + var slowStatements intsets.Fast for i, s := range *statements { if r.detector.isSlow(s) { slowStatements.Add(i) diff --git a/pkg/sql/stats/row_sampling.go b/pkg/sql/stats/row_sampling.go index 3286b9f4d6bd..b9a6b95936a6 100644 --- a/pkg/sql/stats/row_sampling.go +++ b/pkg/sql/stats/row_sampling.go @@ -61,7 +61,7 @@ type SampleReservoir struct { // sampleCols contains the ordinals of columns that should be sampled from // each row. Note that the sampled rows still contain all columns, but // any columns not part of this set are given a null value. - sampleCols intsets.FastIntSet + sampleCols intsets.Fast } var _ heap.Interface = &SampleReservoir{} @@ -71,7 +71,7 @@ func (sr *SampleReservoir) Init( numSamples, minNumSamples int, colTypes []*types.T, memAcc *mon.BoundAccount, - sampleCols intsets.FastIntSet, + sampleCols intsets.Fast, ) { if minNumSamples < 1 || minNumSamples > numSamples { minNumSamples = numSamples diff --git a/pkg/sql/stats/row_sampling_test.go b/pkg/sql/stats/row_sampling_test.go index a3eaefb77aba..21314e8a3d4a 100644 --- a/pkg/sql/stats/row_sampling_test.go +++ b/pkg/sql/stats/row_sampling_test.go @@ -39,7 +39,7 @@ func runSampleTest( ) { ctx := context.Background() var sr SampleReservoir - sr.Init(numSamples, 1, []*types.T{types.Int}, memAcc, intsets.MakeFastIntSet(0)) + sr.Init(numSamples, 1, []*types.T{types.Int}, memAcc, intsets.MakeFast(0)) for _, r := range ranks { d := rowenc.DatumToEncDatum(types.Int, tree.NewDInt(tree.DInt(r))) prevCapacity := sr.Cap() diff --git a/pkg/sql/stats/util.go b/pkg/sql/stats/util.go index 796ac82a1887..d61b51450d0b 100644 --- a/pkg/sql/stats/util.go +++ b/pkg/sql/stats/util.go @@ -18,7 +18,7 @@ import ( // MakeSortedColStatKey constructs a unique key representing cols that can be // used as the key in a map, and also sorts cols as a side-effect. func MakeSortedColStatKey(cols []descpb.ColumnID) string { - var colSet intsets.FastIntSet + var colSet intsets.Fast for _, c := range cols { colSet.Add(int(c)) } diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics.go b/pkg/sql/stmtdiagnostics/statement_diagnostics.go index b2f5396f83bd..fd354a431149 100644 --- a/pkg/sql/stmtdiagnostics/statement_diagnostics.go +++ b/pkg/sql/stmtdiagnostics/statement_diagnostics.go @@ -700,7 +700,7 @@ func (r *Registry) pollRequests(ctx context.Context) error { defer r.mu.Unlock() now := timeutil.Now() - var ids intsets.FastIntSet + var ids intsets.Fast for _, row := range rows { id := RequestID(*row[0].(*tree.DInt)) stmtFingerprint := string(*row[1].(*tree.DString)) diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index 93822847b2f5..1e170f33e5e3 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -321,7 +321,7 @@ func cleanupTempSchemaObjects( if err != nil { return err } - dependentColIDs := intsets.MakeFastIntSet() + dependentColIDs := intsets.MakeFast() for _, colID := range d.ColumnIDs { dependentColIDs.Add(int(colID)) } diff --git a/pkg/sql/type_change.go b/pkg/sql/type_change.go index b108c39cb2af..70e7aa2af02c 100644 --- a/pkg/sql/type_change.go +++ b/pkg/sql/type_change.go @@ -1006,7 +1006,7 @@ func findUsagesOfEnumValueInPartitioning( return false, nil } - var colsToCheck intsets.FastIntSet + var colsToCheck intsets.Fast for i, c := range columns[:partitioning.NumColumns()] { typT := c.GetType() if !typT.UserDefined() { @@ -1080,7 +1080,7 @@ func findUsageOfEnumValueInEncodedPartitioningValue( partitioning catalog.Partitioning, v []byte, fakePrefixDatums []tree.Datum, - colsToCheck intsets.FastIntSet, + colsToCheck intsets.Fast, foundUsage bool, member *descpb.TypeDescriptor_EnumMember, ) (bool, error) { diff --git a/pkg/upgrade/upgrades/alter_jobs_add_job_type_test.go b/pkg/upgrade/upgrades/alter_jobs_add_job_type_test.go index 18b081fed0ea..4a9bff297d40 100644 --- a/pkg/upgrade/upgrades/alter_jobs_add_job_type_test.go +++ b/pkg/upgrade/upgrades/alter_jobs_add_job_type_test.go @@ -171,7 +171,7 @@ func TestAlterSystemJobsTableAddJobTypeColumn(t *testing.T) { var typStr string rows, err := sqlDB.Query("SELECT distinct(job_type) FROM system.jobs") require.NoError(t, err) - var seenTypes intsets.FastIntSet + var seenTypes intsets.Fast for rows.Next() { err = rows.Scan(&typStr) require.NoError(t, err) diff --git a/pkg/util/intsets/BUILD.bazel b/pkg/util/intsets/BUILD.bazel index b0ed5e9d4445..35fa89641351 100644 --- a/pkg/util/intsets/BUILD.bazel +++ b/pkg/util/intsets/BUILD.bazel @@ -4,10 +4,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "intsets", srcs = [ - "fast_int_set_large.go", - "fast_int_set_small.go", - "fast_int_set_str.go", - "fast_int_set_testonly.go", + "fast_large.go", + "fast_small.go", + "fast_str.go", + "fast_testonly.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/util/intsets", visibility = ["//visibility:public"], @@ -19,7 +19,7 @@ go_library( go_test( name = "intsets_test", - srcs = ["fast_int_set_test.go"], + srcs = ["fast_test.go"], args = ["-test.timeout=295s"], embed = [":intsets"], deps = ["//pkg/util/randutil"], diff --git a/pkg/util/intsets/fast_int_set.go b/pkg/util/intsets/fast.go similarity index 88% rename from pkg/util/intsets/fast_int_set.go rename to pkg/util/intsets/fast.go index da7dccdc28bf..b8685330d978 100644 --- a/pkg/util/intsets/fast_int_set.go +++ b/pkg/util/intsets/fast.go @@ -23,10 +23,10 @@ import ( "golang.org/x/tools/container/intsets" ) -// FastIntSet keeps track of a set of integers. It does not perform any +// Fast keeps track of a set of integers. It does not perform any // allocations when the values are in the range [0, smallCutoff). It is not // thread-safe. -type FastIntSet struct { +type Fast struct { // small is a bitmap that stores values in the range [0, smallCutoff). small bitmap // large is only allocated if values are added to the set that are not in @@ -44,11 +44,11 @@ type FastIntSet struct { // | | // ------------------------------------------------------------ // - // FastIntSet stores only values outside the range [0, smallCutoff) in + // Fast stores only values outside the range [0, smallCutoff) in // large. Values less than 0 are stored in large as-is. For values greater // than or equal to smallCutoff, we subtract by smallCutoff before storing // them in large. When they are retrieved from large, we add smallCutoff to - // get the original value. For example, if 300 is added to the FastIntSet, + // get the original value. For example, if 300 is added to the Fast, // it would be added to large as the value (300 - smallCutoff). // // This scheme better utilizes the block with offset=0 compared to an @@ -80,9 +80,9 @@ type bitmap struct { lo, hi uint64 } -// MakeFastIntSet returns a set initialized with the given values. -func MakeFastIntSet(vals ...int) FastIntSet { - var res FastIntSet +// MakeFast returns a set initialized with the given values. +func MakeFast(vals ...int) Fast { + var res Fast for _, v := range vals { res.Add(v) } @@ -91,14 +91,14 @@ func MakeFastIntSet(vals ...int) FastIntSet { // fitsInSmall returns whether all elements in this set are between 0 and // smallCutoff. -func (s *FastIntSet) fitsInSmall() bool { +func (s *Fast) fitsInSmall() bool { return s.large == nil || s.large.IsEmpty() } // Add adds a value to the set. No-op if the value is already in the set. If the // large set is not nil and the value is within the range [0, 63], the value is // added to both the large and small sets. -func (s *FastIntSet) Add(i int) { +func (s *Fast) Add(i int) { if i >= 0 && i < smallCutoff { s.small.Set(i) return @@ -116,9 +116,9 @@ func (s *FastIntSet) Add(i int) { // E.g. AddRange(1,5) adds the values 1, 2, 3, 4, 5 to the set. // 'to' must be >= 'from'. // AddRange is always more efficient than individual Adds. -func (s *FastIntSet) AddRange(from, to int) { +func (s *Fast) AddRange(from, to int) { if to < from { - panic("invalid range when adding range to FastIntSet") + panic("invalid range when adding range to Fast") } if s.large == nil && from >= 0 && to < smallCutoff { @@ -131,7 +131,7 @@ func (s *FastIntSet) AddRange(from, to int) { } // Remove removes a value from the set. No-op if the value is not in the set. -func (s *FastIntSet) Remove(i int) { +func (s *Fast) Remove(i int) { if i >= 0 && i < smallCutoff { s.small.Unset(i) return @@ -145,7 +145,7 @@ func (s *FastIntSet) Remove(i int) { } // Contains returns true if the set contains the value. -func (s FastIntSet) Contains(i int) bool { +func (s Fast) Contains(i int) bool { if i >= 0 && i < smallCutoff { return s.small.IsSet(i) } @@ -159,12 +159,12 @@ func (s FastIntSet) Contains(i int) bool { } // Empty returns true if the set is empty. -func (s FastIntSet) Empty() bool { +func (s Fast) Empty() bool { return s.small == bitmap{} && (s.large == nil || s.large.IsEmpty()) } // Len returns the number of the elements in the set. -func (s FastIntSet) Len() int { +func (s Fast) Len() int { l := s.small.OnesCount() if s.large != nil { l += s.large.Len() @@ -174,7 +174,7 @@ func (s FastIntSet) Len() int { // Next returns the first value in the set which is >= startVal. If there is no // value, the second return value is false. -func (s FastIntSet) Next(startVal int) (int, bool) { +func (s Fast) Next(startVal int) (int, bool) { if startVal < 0 && s.large != nil { if res := s.large.LowerBound(startVal); res < 0 { return res, true @@ -202,7 +202,7 @@ func (s FastIntSet) Next(startVal int) (int, bool) { } // ForEach calls a function for each value in the set (in increasing order). -func (s FastIntSet) ForEach(f func(i int)) { +func (s Fast) ForEach(f func(i int)) { if !s.fitsInSmall() { for x := s.large.Min(); x < 0; x = s.large.LowerBound(x + 1) { f(x) @@ -226,7 +226,7 @@ func (s FastIntSet) ForEach(f func(i int)) { } // Ordered returns a slice with all the integers in the set, in increasing order. -func (s FastIntSet) Ordered() []int { +func (s Fast) Ordered() []int { if s.Empty() { return nil } @@ -238,8 +238,8 @@ func (s FastIntSet) Ordered() []int { } // Copy returns a copy of s which can be modified independently. -func (s FastIntSet) Copy() FastIntSet { - var c FastIntSet +func (s Fast) Copy() Fast { + var c Fast c.small = s.small if s.large != nil && !s.large.IsEmpty() { c.large = new(intsets.Sparse) @@ -250,7 +250,7 @@ func (s FastIntSet) Copy() FastIntSet { // CopyFrom sets the receiver to a copy of other, which can then be modified // independently. -func (s *FastIntSet) CopyFrom(other FastIntSet) { +func (s *Fast) CopyFrom(other Fast) { s.small = other.small if other.large != nil && !other.large.IsEmpty() { if s.large == nil { @@ -265,7 +265,7 @@ func (s *FastIntSet) CopyFrom(other FastIntSet) { } // UnionWith adds all the elements from rhs to this set. -func (s *FastIntSet) UnionWith(rhs FastIntSet) { +func (s *Fast) UnionWith(rhs Fast) { s.small.UnionWith(rhs.small) if rhs.large == nil || rhs.large.IsEmpty() { // Fast path. @@ -278,14 +278,14 @@ func (s *FastIntSet) UnionWith(rhs FastIntSet) { } // Union returns the union of s and rhs as a new set. -func (s FastIntSet) Union(rhs FastIntSet) FastIntSet { +func (s Fast) Union(rhs Fast) Fast { r := s.Copy() r.UnionWith(rhs) return r } // IntersectionWith removes any elements not in rhs from this set. -func (s *FastIntSet) IntersectionWith(rhs FastIntSet) { +func (s *Fast) IntersectionWith(rhs Fast) { s.small.IntersectionWith(rhs.small) if rhs.large == nil { s.large = nil @@ -298,14 +298,14 @@ func (s *FastIntSet) IntersectionWith(rhs FastIntSet) { } // Intersection returns the intersection of s and rhs as a new set. -func (s FastIntSet) Intersection(rhs FastIntSet) FastIntSet { +func (s Fast) Intersection(rhs Fast) Fast { r := s.Copy() r.IntersectionWith(rhs) return r } // Intersects returns true if s has any elements in common with rhs. -func (s FastIntSet) Intersects(rhs FastIntSet) bool { +func (s Fast) Intersects(rhs Fast) bool { if s.small.Intersects(rhs.small) { return true } @@ -316,7 +316,7 @@ func (s FastIntSet) Intersects(rhs FastIntSet) bool { } // DifferenceWith removes any elements in rhs from this set. -func (s *FastIntSet) DifferenceWith(rhs FastIntSet) { +func (s *Fast) DifferenceWith(rhs Fast) { s.small.DifferenceWith(rhs.small) if s.large == nil || rhs.large == nil { // Fast path @@ -326,14 +326,14 @@ func (s *FastIntSet) DifferenceWith(rhs FastIntSet) { } // Difference returns the elements of s that are not in rhs as a new set. -func (s FastIntSet) Difference(rhs FastIntSet) FastIntSet { +func (s Fast) Difference(rhs Fast) Fast { r := s.Copy() r.DifferenceWith(rhs) return r } // Equals returns true if the two sets are identical. -func (s FastIntSet) Equals(rhs FastIntSet) bool { +func (s Fast) Equals(rhs Fast) bool { if s.small != rhs.small { return false } @@ -347,7 +347,7 @@ func (s FastIntSet) Equals(rhs FastIntSet) bool { } // SubsetOf returns true if rhs contains all the elements in s. -func (s FastIntSet) SubsetOf(rhs FastIntSet) bool { +func (s Fast) SubsetOf(rhs Fast) bool { if s.fitsInSmall() { return s.small.SubsetOf(rhs.small) } @@ -368,7 +368,7 @@ func (s FastIntSet) SubsetOf(rhs FastIntSet) bool { // // WARNING: this is used by plan gists, so if this encoding changes, // explain.gistVersion needs to be bumped. -func (s *FastIntSet) Encode(buf *bytes.Buffer) error { +func (s *Fast) Encode(buf *bytes.Buffer) error { if s.large != nil && s.large.Min() < 0 { return errors.AssertionFailedf("Encode used with negative elements") } @@ -395,12 +395,12 @@ func (s *FastIntSet) Encode(buf *bytes.Buffer) error { // Decode does the opposite of Encode. The contents of the receiver are // overwritten. -func (s *FastIntSet) Decode(br io.ByteReader) error { +func (s *Fast) Decode(br io.ByteReader) error { length, err := binary.ReadUvarint(br) if err != nil { return err } - *s = FastIntSet{} + *s = Fast{} if length == 0 { // Special case: a 64-bit bitmap is encoded directly. @@ -413,7 +413,7 @@ func (s *FastIntSet) Decode(br io.ByteReader) error { for i := 0; i < int(length); i++ { elem, err := binary.ReadUvarint(br) if err != nil { - *s = FastIntSet{} + *s = Fast{} return err } s.Add(int(elem)) diff --git a/pkg/util/intsets/fast_int_set_large.go b/pkg/util/intsets/fast_large.go similarity index 100% rename from pkg/util/intsets/fast_int_set_large.go rename to pkg/util/intsets/fast_large.go diff --git a/pkg/util/intsets/fast_int_set_small.go b/pkg/util/intsets/fast_small.go similarity index 100% rename from pkg/util/intsets/fast_int_set_small.go rename to pkg/util/intsets/fast_small.go diff --git a/pkg/util/intsets/fast_int_set_str.go b/pkg/util/intsets/fast_str.go similarity index 97% rename from pkg/util/intsets/fast_int_set_str.go rename to pkg/util/intsets/fast_str.go index d47ca1bab35e..479061ad8ce7 100644 --- a/pkg/util/intsets/fast_int_set_str.go +++ b/pkg/util/intsets/fast_str.go @@ -18,7 +18,7 @@ import ( // String returns a list representation of elements. Sequential runs of positive // numbers are shown as ranges. For example, for the set {0, 1, 2, 5, 6, 10}, // the output is "(0-2,5,6,10)". -func (s FastIntSet) String() string { +func (s Fast) String() string { var buf bytes.Buffer buf.WriteByte('(') appendRange := func(start, end int) { diff --git a/pkg/util/intsets/fast_int_set_test.go b/pkg/util/intsets/fast_test.go similarity index 93% rename from pkg/util/intsets/fast_int_set_test.go rename to pkg/util/intsets/fast_test.go index eb9743496056..ffaab8f5d0ac 100644 --- a/pkg/util/intsets/fast_int_set_test.go +++ b/pkg/util/intsets/fast_test.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -func TestFastIntSet(t *testing.T) { +func TestFast(t *testing.T) { for _, minVal := range []int{-10, -1, 0, 8, smallCutoff, 2 * smallCutoff} { for _, maxVal := range []int{-1, 1, 8, 30, smallCutoff, 2 * smallCutoff, 4 * smallCutoff} { if maxVal <= minVal { @@ -35,7 +35,7 @@ func TestFastIntSet(t *testing.T) { in := make(map[int]bool) forEachRes := make(map[int]bool) - var s FastIntSet + var s Fast for i := 0; i < 1000; i++ { v := minVal + rng.Intn(maxVal-minVal) if rng.Intn(2) == 0 { @@ -77,7 +77,7 @@ func TestFastIntSet(t *testing.T) { if o := s.Ordered(); !reflect.DeepEqual(vals, o) { t.Fatalf("set built with Next doesn't match Ordered: %v vs %v", vals, o) } - assertSame := func(orig, copy FastIntSet) { + assertSame := func(orig, copy Fast) { t.Helper() if !orig.Equals(copy) || !copy.Equals(orig) { t.Fatalf("expected equality: %v, %v", orig, copy) @@ -97,7 +97,7 @@ func TestFastIntSet(t *testing.T) { s2 := s.Copy() assertSame(s, s2) // Test CopyFrom. - var s3 FastIntSet + var s3 Fast s3.CopyFrom(s) assertSame(s, s3) // Make sure CopyFrom into a non-empty set still works. @@ -112,13 +112,13 @@ func TestFastIntSet(t *testing.T) { t.Fatalf("error during Encode: %v", err) } encoded := buf.String() - var s2 FastIntSet + var s2 Fast if err := s2.Decode(bytes.NewReader([]byte(encoded))); err != nil { t.Fatalf("error during Decode: %v", err) } assertSame(s, s2) // Verify that decoding into a non-empty set still works. - var s3 FastIntSet + var s3 Fast s3.Add(minVal + rng.Intn(maxVal-minVal)) if err := s3.Decode(bytes.NewReader([]byte(encoded))); err != nil { t.Fatalf("error during Decode: %v", err) @@ -131,12 +131,12 @@ func TestFastIntSet(t *testing.T) { } } -func TestFastIntSetTwoSetOps(t *testing.T) { +func TestFastTwoSetOps(t *testing.T) { rng, _ := randutil.NewTestRand() // genSet creates a set of numElem values in [minVal, minVal + valRange) // It also adds and then removes numRemoved elements. - genSet := func(numElem, numRemoved, minVal, valRange int) (FastIntSet, map[int]bool) { - var s FastIntSet + genSet := func(numElem, numRemoved, minVal, valRange int) (Fast, map[int]bool) { + var s Fast vals := rng.Perm(valRange)[:numElem+numRemoved] used := make(map[int]bool, len(vals)) for _, i := range vals { @@ -266,8 +266,8 @@ func TestFastIntSetTwoSetOps(t *testing.T) { } } -func TestFastIntSetAddRange(t *testing.T) { - assertSet := func(set *FastIntSet, from, to int) { +func TestFastAddRange(t *testing.T) { + assertSet := func(set *Fast, from, to int) { t.Helper() // Iterate through the set and ensure that the values // it contain are the values from 'from' to 'to' (inclusively). @@ -275,10 +275,10 @@ func TestFastIntSetAddRange(t *testing.T) { set.ForEach(func(actual int) { t.Helper() if actual > to { - t.Fatalf("expected last value in FastIntSet to be %d, got %d", to, actual) + t.Fatalf("expected last value in Fast to be %d, got %d", to, actual) } if expected != actual { - t.Fatalf("expected next value in FastIntSet to be %d, got %d", expected, actual) + t.Fatalf("expected next value in Fast to be %d, got %d", expected, actual) } expected++ }) @@ -289,14 +289,14 @@ func TestFastIntSetAddRange(t *testing.T) { // [-5, smallCutoff + 20]. for from := -5; from <= max; from++ { for to := from; to <= max; to++ { - var set FastIntSet + var set Fast set.AddRange(from, to) assertSet(&set, from, to) } } } -func TestFastIntSetString(t *testing.T) { +func TestFastString(t *testing.T) { testCases := []struct { vals []int exp string @@ -320,7 +320,7 @@ func TestFastIntSetString(t *testing.T) { } for i, tc := range testCases { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - s := MakeFastIntSet(tc.vals...) + s := MakeFast(tc.vals...) if str := s.String(); str != tc.exp { t.Errorf("expected %s, got %s", tc.exp, str) } diff --git a/pkg/util/intsets/fast_int_set_testonly.go b/pkg/util/intsets/fast_testonly.go similarity index 79% rename from pkg/util/intsets/fast_int_set_testonly.go rename to pkg/util/intsets/fast_testonly.go index 645d12259cb4..4b388b29db36 100644 --- a/pkg/util/intsets/fast_int_set_testonly.go +++ b/pkg/util/intsets/fast_testonly.go @@ -11,7 +11,7 @@ //go:build fast_int_set_small || fast_int_set_large // +build fast_int_set_small fast_int_set_large -// This file implements two variants of FastIntSet used for testing which always +// This file implements two variants of Fast used for testing which always // behaves like in either the "small" or "large" case (depending on // fastIntSetAlwaysSmall). Tests that exhibit a difference when using one of // these variants indicates a bug. @@ -27,35 +27,35 @@ import ( "golang.org/x/tools/container/intsets" ) -// FastIntSet keeps track of a set of integers. It does not perform any +// Fast keeps track of a set of integers. It does not perform any // allocations when the values are small. It is not thread-safe. -type FastIntSet struct { +type Fast struct { // Used to keep the size of the struct the same. _ struct{ lo, hi uint64 } s *intsets.Sparse } -// MakeFastIntSet returns a set initialized with the given values. -func MakeFastIntSet(vals ...int) FastIntSet { - var res FastIntSet +// MakeFast returns a set initialized with the given values. +func MakeFast(vals ...int) Fast { + var res Fast for _, v := range vals { res.Add(v) } return res } -func (s *FastIntSet) prepareForMutation() { +func (s *Fast) prepareForMutation() { if s.s == nil { s.s = &intsets.Sparse{} } else if fastIntSetAlwaysSmall { // We always make a full copy to prevent any aliasing; this simulates the - // semantics of the "small" regime of FastIntSet. + // semantics of the "small" regime of Fast. *s = s.Copy() } } // Add adds a value to the set. No-op if the value is already in the set. -func (s *FastIntSet) Add(i int) { +func (s *Fast) Add(i int) { s.prepareForMutation() s.s.Insert(i) } @@ -64,7 +64,7 @@ func (s *FastIntSet) Add(i int) { // E.g. AddRange(1,5) adds the values 1, 2, 3, 4, 5 to the set. // 'to' must be >= 'from'. // AddRange is always more efficient than individual Adds. -func (s *FastIntSet) AddRange(from, to int) { +func (s *Fast) AddRange(from, to int) { s.prepareForMutation() for i := from; i <= to; i++ { s.s.Insert(i) @@ -72,23 +72,23 @@ func (s *FastIntSet) AddRange(from, to int) { } // Remove removes a value from the set. No-op if the value is not in the set. -func (s *FastIntSet) Remove(i int) { +func (s *Fast) Remove(i int) { s.prepareForMutation() s.s.Remove(i) } // Contains returns true if the set contains the value. -func (s FastIntSet) Contains(i int) bool { +func (s Fast) Contains(i int) bool { return s.s != nil && s.s.Has(i) } // Empty returns true if the set is empty. -func (s FastIntSet) Empty() bool { +func (s Fast) Empty() bool { return s.s == nil || s.s.IsEmpty() } // Len returns the number of the elements in the set. -func (s FastIntSet) Len() int { +func (s Fast) Len() int { if s.s == nil { return 0 } @@ -97,7 +97,7 @@ func (s FastIntSet) Len() int { // Next returns the first value in the set which is >= startVal. If there is no // value, the second return value is false. -func (s FastIntSet) Next(startVal int) (int, bool) { +func (s Fast) Next(startVal int) (int, bool) { if s.s == nil { return intsets.MaxInt, false } @@ -106,7 +106,7 @@ func (s FastIntSet) Next(startVal int) (int, bool) { } // ForEach calls a function for each value in the set (in increasing order). -func (s FastIntSet) ForEach(f func(i int)) { +func (s Fast) ForEach(f func(i int)) { if s.s == nil { return } @@ -116,7 +116,7 @@ func (s FastIntSet) ForEach(f func(i int)) { } // Ordered returns a slice with all the integers in the set, in increasing order. -func (s FastIntSet) Ordered() []int { +func (s Fast) Ordered() []int { if s.Empty() { return nil } @@ -124,22 +124,22 @@ func (s FastIntSet) Ordered() []int { } // Copy returns a copy of s which can be modified independently. -func (s FastIntSet) Copy() FastIntSet { +func (s Fast) Copy() Fast { n := &intsets.Sparse{} if s.s != nil { n.Copy(s.s) } - return FastIntSet{s: n} + return Fast{s: n} } // CopyFrom sets the receiver to a copy of other, which can then be modified // independently. -func (s *FastIntSet) CopyFrom(other FastIntSet) { +func (s *Fast) CopyFrom(other Fast) { *s = other.Copy() } // UnionWith adds all the elements from rhs to this set. -func (s *FastIntSet) UnionWith(rhs FastIntSet) { +func (s *Fast) UnionWith(rhs Fast) { if rhs.s == nil { return } @@ -148,16 +148,16 @@ func (s *FastIntSet) UnionWith(rhs FastIntSet) { } // Union returns the union of s and rhs as a new set. -func (s FastIntSet) Union(rhs FastIntSet) FastIntSet { +func (s Fast) Union(rhs Fast) Fast { r := s.Copy() r.UnionWith(rhs) return r } // IntersectionWith removes any elements not in rhs from this set. -func (s *FastIntSet) IntersectionWith(rhs FastIntSet) { +func (s *Fast) IntersectionWith(rhs Fast) { if rhs.s == nil { - *s = FastIntSet{} + *s = Fast{} return } s.prepareForMutation() @@ -165,14 +165,14 @@ func (s *FastIntSet) IntersectionWith(rhs FastIntSet) { } // Intersection returns the intersection of s and rhs as a new set. -func (s FastIntSet) Intersection(rhs FastIntSet) FastIntSet { +func (s Fast) Intersection(rhs Fast) Fast { r := s.Copy() r.IntersectionWith(rhs) return r } // Intersects returns true if s has any elements in common with rhs. -func (s FastIntSet) Intersects(rhs FastIntSet) bool { +func (s Fast) Intersects(rhs Fast) bool { if s.s == nil || rhs.s == nil { return false } @@ -180,7 +180,7 @@ func (s FastIntSet) Intersects(rhs FastIntSet) bool { } // DifferenceWith removes any elements in rhs from this set. -func (s *FastIntSet) DifferenceWith(rhs FastIntSet) { +func (s *Fast) DifferenceWith(rhs Fast) { if rhs.s == nil { return } @@ -189,14 +189,14 @@ func (s *FastIntSet) DifferenceWith(rhs FastIntSet) { } // Difference returns the elements of s that are not in rhs as a new set. -func (s FastIntSet) Difference(rhs FastIntSet) FastIntSet { +func (s Fast) Difference(rhs Fast) Fast { r := s.Copy() r.DifferenceWith(rhs) return r } // Equals returns true if the two sets are identical. -func (s FastIntSet) Equals(rhs FastIntSet) bool { +func (s Fast) Equals(rhs Fast) bool { if s.Empty() || rhs.Empty() { return s.Empty() == rhs.Empty() } @@ -204,7 +204,7 @@ func (s FastIntSet) Equals(rhs FastIntSet) bool { } // SubsetOf returns true if rhs contains all the elements in s. -func (s FastIntSet) SubsetOf(rhs FastIntSet) bool { +func (s Fast) SubsetOf(rhs Fast) bool { if s.Empty() { return true } @@ -216,12 +216,12 @@ func (s FastIntSet) SubsetOf(rhs FastIntSet) bool { // Shift generates a new set which contains elements i+delta for elements i in // the original set. -func (s *FastIntSet) Shift(delta int) FastIntSet { +func (s *Fast) Shift(delta int) Fast { n := &intsets.Sparse{} s.ForEach(func(i int) { n.Insert(i + delta) }) - return FastIntSet{s: n} + return Fast{s: n} } // Encode the set and write it to a bytes.Buffer using binary.varint byte @@ -234,7 +234,7 @@ func (s *FastIntSet) Shift(delta int) FastIntSet { // // WARNING: this is used by plan gists, so if this encoding changes, // explain.gistVersion needs to be bumped. -func (s *FastIntSet) Encode(buf *bytes.Buffer) error { +func (s *Fast) Encode(buf *bytes.Buffer) error { if s.s != nil && s.s.Min() < 0 { return errors.AssertionFailedf("Encode used with negative elements") } @@ -265,7 +265,7 @@ func (s *FastIntSet) Encode(buf *bytes.Buffer) error { // Decode does the opposite of Encode. The contents of the receiver are // overwritten. -func (s *FastIntSet) Decode(br io.ByteReader) error { +func (s *Fast) Decode(br io.ByteReader) error { length, err := binary.ReadUvarint(br) if err != nil { return err @@ -289,7 +289,7 @@ func (s *FastIntSet) Decode(br io.ByteReader) error { for i := 0; i < int(length); i++ { elem, err := binary.ReadUvarint(br) if err != nil { - *s = FastIntSet{} + *s = Fast{} return err } s.Add(int(elem)) diff --git a/pkg/util/json/json.go b/pkg/util/json/json.go index 150431e6f2c3..d2ba389e32df 100644 --- a/pkg/util/json/json.go +++ b/pkg/util/json/json.go @@ -404,7 +404,7 @@ func (b *ObjectBuilder) Build() JSON { type FixedKeysObjectBuilder struct { pairs []jsonKeyValuePair keyOrd map[string]int - updated intsets.FastIntSet + updated intsets.Fast } // NewFixedKeysObjectBuilder creates JSON object builder for the specified @@ -448,7 +448,7 @@ func (b *FixedKeysObjectBuilder) Build() (JSON, error) { "expected all %d keys to be updated, %d updated", len(b.pairs), b.updated.Len()) } - b.updated = intsets.FastIntSet{} + b.updated = intsets.Fast{} // Must copy b.pairs in case builder is reused. return jsonObject(append([]jsonKeyValuePair(nil), b.pairs...)), nil } From 9031550dbfe2b3fcf66d6176b1e196be9d3ea36b Mon Sep 17 00:00:00 2001 From: Marcus Gartner Date: Fri, 14 Oct 2022 13:18:44 -0400 Subject: [PATCH 3/7] intsets: move bitmap into a separate file Release note: None --- pkg/util/intsets/BUILD.bazel | 1 + pkg/util/intsets/bitmap.go | 105 +++++++++++++++++++++++++++++++++++ pkg/util/intsets/fast.go | 92 ------------------------------ 3 files changed, 106 insertions(+), 92 deletions(-) create mode 100644 pkg/util/intsets/bitmap.go diff --git a/pkg/util/intsets/BUILD.bazel b/pkg/util/intsets/BUILD.bazel index 35fa89641351..46253b942043 100644 --- a/pkg/util/intsets/BUILD.bazel +++ b/pkg/util/intsets/BUILD.bazel @@ -4,6 +4,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "intsets", srcs = [ + "bitmap.go", "fast_large.go", "fast_small.go", "fast_str.go", diff --git a/pkg/util/intsets/bitmap.go b/pkg/util/intsets/bitmap.go new file mode 100644 index 000000000000..0cd12f450a5b --- /dev/null +++ b/pkg/util/intsets/bitmap.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package intsets + +import "math/bits" + +// smallCutoff is the size of the small bitmap. +// Note: this can be set to a smaller value, e.g. for testing. +const smallCutoff = 128 + +// bitmap implements a bitmap of size smallCutoff. +type bitmap struct { + // We don't use an array because that makes Go always keep the struct on the + // stack (see https://github.com/golang/go/issues/24416). + lo, hi uint64 +} + +func (v bitmap) IsSet(i int) bool { + w := v.lo + if i >= 64 { + w = v.hi + } + return w&(1<= 64: + v.hi |= mask(from&63, to&63) + default: + v.lo |= mask(from, 63) + v.hi |= mask(0, to&63) + } +} + +func (v *bitmap) UnionWith(other bitmap) { + v.lo |= other.lo + v.hi |= other.hi +} + +func (v *bitmap) IntersectionWith(other bitmap) { + v.lo &= other.lo + v.hi &= other.hi +} + +func (v bitmap) Intersects(other bitmap) bool { + return ((v.lo & other.lo) | (v.hi & other.hi)) != 0 +} + +func (v *bitmap) DifferenceWith(other bitmap) { + v.lo &^= other.lo + v.hi &^= other.hi +} + +func (v bitmap) SubsetOf(other bitmap) bool { + return (v.lo&other.lo == v.lo) && (v.hi&other.hi == v.hi) +} + +func (v bitmap) OnesCount() int { + return bits.OnesCount64(v.lo) + bits.OnesCount64(v.hi) +} + +func (v bitmap) Next(startVal int) (nextVal int, ok bool) { + if startVal < 64 { + if ntz := bits.TrailingZeros64(v.lo >> uint64(startVal)); ntz < 64 { + // Found next element in the low word. + return startVal + ntz, true + } + startVal = 64 + } + // Check high word. + if ntz := bits.TrailingZeros64(v.hi >> uint64(startVal&63)); ntz < 64 { + return startVal + ntz, true + } + return -1, false +} diff --git a/pkg/util/intsets/fast.go b/pkg/util/intsets/fast.go index b8685330d978..348fba46e52d 100644 --- a/pkg/util/intsets/fast.go +++ b/pkg/util/intsets/fast.go @@ -62,10 +62,6 @@ type Fast struct { large *intsets.Sparse } -// smallCutoff is the size of the small bitmap. -// Note: this can be set to a smaller value, e.g. for testing. -const smallCutoff = 128 - const ( // MaxInt is the maximum integer that a set can contain. MaxInt = intsets.MaxInt @@ -73,13 +69,6 @@ const ( MinInt = intsets.MinInt ) -// bitmap implements a bitmap of size smallCutoff. -type bitmap struct { - // We don't use an array because that makes Go always keep the struct on the - // stack (see https://github.com/golang/go/issues/24416). - lo, hi uint64 -} - // MakeFast returns a set initialized with the given values. func MakeFast(vals ...int) Fast { var res Fast @@ -421,84 +410,3 @@ func (s *Fast) Decode(br io.ByteReader) error { } return nil } - -func (v bitmap) IsSet(i int) bool { - w := v.lo - if i >= 64 { - w = v.hi - } - return w&(1<= 64: - v.hi |= mask(from&63, to&63) - default: - v.lo |= mask(from, 63) - v.hi |= mask(0, to&63) - } -} - -func (v *bitmap) UnionWith(other bitmap) { - v.lo |= other.lo - v.hi |= other.hi -} - -func (v *bitmap) IntersectionWith(other bitmap) { - v.lo &= other.lo - v.hi &= other.hi -} - -func (v bitmap) Intersects(other bitmap) bool { - return ((v.lo & other.lo) | (v.hi & other.hi)) != 0 -} - -func (v *bitmap) DifferenceWith(other bitmap) { - v.lo &^= other.lo - v.hi &^= other.hi -} - -func (v bitmap) SubsetOf(other bitmap) bool { - return (v.lo&other.lo == v.lo) && (v.hi&other.hi == v.hi) -} - -func (v bitmap) OnesCount() int { - return bits.OnesCount64(v.lo) + bits.OnesCount64(v.hi) -} - -func (v bitmap) Next(startVal int) (nextVal int, ok bool) { - if startVal < 64 { - if ntz := bits.TrailingZeros64(v.lo >> uint64(startVal)); ntz < 64 { - // Found next element in the low word. - return startVal + ntz, true - } - startVal = 64 - } - // Check high word. - if ntz := bits.TrailingZeros64(v.hi >> uint64(startVal&63)); ntz < 64 { - return startVal + ntz, true - } - return -1, false -} From 7b92c4eb05957592cc33c5f43c70cb317500247c Mon Sep 17 00:00:00 2001 From: Pavel Kalinnikov Date: Tue, 20 Dec 2022 13:34:15 +0000 Subject: [PATCH 4/7] roachtest: query only primary index in SHOW RANGES The semantics of SHOW RANGES has changed in 5604feaf, it now includes ranges of the secondary indicies. This change fixes the query to only request ranges of the primary index, to support the original assertion. Fixes #93703 Fixes #93708 Epic: none --- pkg/cmd/roachtest/tests/copy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/roachtest/tests/copy.go b/pkg/cmd/roachtest/tests/copy.go index 7b21fd066406..ba2fff9df50a 100644 --- a/pkg/cmd/roachtest/tests/copy.go +++ b/pkg/cmd/roachtest/tests/copy.go @@ -71,7 +71,7 @@ func registerCopy(r registry.Registry) { rangeCount := func() int { var count int - const q = "SELECT count(*) FROM [SHOW RANGES FROM TABLE bank.bank]" + const q = "SELECT count(*) FROM [SHOW RANGES FROM INDEX bank.bank@primary]" if err := db.QueryRow(q).Scan(&count); err != nil { t.Fatalf("failed to get range count: %v", err) } From d63e1e14fc3870bafbf4aa999c594db112cef760 Mon Sep 17 00:00:00 2001 From: Marcus Gartner Date: Fri, 14 Oct 2022 14:35:34 -0400 Subject: [PATCH 5/7] intsets: add Sparse and use it in Fast This commit replaces usages of the Sparse type in golang.org/x/tools/container/intsets with a new `intsets.Sparse` type. The new type is inspired by the `x/tools` type, but differs in several ways: 1. The new `Sparse` type provides a smaller API than the `x/tools` `Sparse` type, only containing the methods required by `intsets.Fast`. 2. The new `Sparse` type is implemented as a singly-linked list of blocks rather than a circular, doubly-linked list. 3. The new `Sparse` type reuses the `bitmap` type used in `intsets.Fast`. As a result, each block can store up to 128 integers instead of 256. This simpler implementation yields a performance boost in query optimization of some types of queries. Release note: None --- pkg/testutils/lint/lint_test.go | 1 + pkg/upgrade/upgrades/BUILD.bazel | 1 + pkg/util/BUILD.bazel | 3 +- pkg/util/fast_int_map.go | 2 +- pkg/util/intsets/BUILD.bazel | 16 +- pkg/util/intsets/fast.go | 68 ++---- pkg/util/intsets/fast_testonly.go | 15 +- pkg/util/intsets/oracle.go | 139 +++++++++++ pkg/util/intsets/sparse.go | 391 ++++++++++++++++++++++++++++++ pkg/util/intsets/sparse_test.go | 175 +++++++++++++ 10 files changed, 743 insertions(+), 68 deletions(-) create mode 100644 pkg/util/intsets/oracle.go create mode 100644 pkg/util/intsets/sparse.go create mode 100644 pkg/util/intsets/sparse_test.go diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index b657e74a833a..80f489bfe0d1 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -2023,6 +2023,7 @@ func TestLint(t *testing.T) { "../../storage/enginepb", "../../util", "../../util/hlc", + "../../util/intsets", } // Ensure that all packages that have '//gcassert' or '// gcassert' diff --git a/pkg/upgrade/upgrades/BUILD.bazel b/pkg/upgrade/upgrades/BUILD.bazel index 7def17f1eaec..9950483d7d8b 100644 --- a/pkg/upgrade/upgrades/BUILD.bazel +++ b/pkg/upgrade/upgrades/BUILD.bazel @@ -146,6 +146,7 @@ go_test( "//pkg/util", "//pkg/util/ctxgroup", "//pkg/util/hlc", + "//pkg/util/intsets", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/protoutil", diff --git a/pkg/util/BUILD.bazel b/pkg/util/BUILD.bazel index 915af7ce85ad..4677c91a56cf 100644 --- a/pkg/util/BUILD.bazel +++ b/pkg/util/BUILD.bazel @@ -11,7 +11,6 @@ go_library( "constants_metamorphic_enable.go", "every_n.go", "fast_int_map.go", - "fast_int_set.go", # keep "hash.go", "nocopy.go", "pluralize.go", @@ -32,12 +31,12 @@ go_library( deps = [ "//pkg/util/buildutil", "//pkg/util/envutil", + "//pkg/util/intsets", "//pkg/util/netutil/addr", "//pkg/util/randutil", "//pkg/util/syncutil", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", - "@org_golang_x_tools//container/intsets", ], ) diff --git a/pkg/util/fast_int_map.go b/pkg/util/fast_int_map.go index 924e305b9046..9a032d36c4b9 100644 --- a/pkg/util/fast_int_map.go +++ b/pkg/util/fast_int_map.go @@ -16,7 +16,7 @@ import ( "math/bits" "sort" - "golang.org/x/tools/container/intsets" + "github.com/cockroachdb/cockroach/pkg/util/intsets" ) // FastIntMap is a replacement for map[int]int which is more efficient when both diff --git a/pkg/util/intsets/BUILD.bazel b/pkg/util/intsets/BUILD.bazel index 46253b942043..6f01c77f16f3 100644 --- a/pkg/util/intsets/BUILD.bazel +++ b/pkg/util/intsets/BUILD.bazel @@ -5,23 +5,27 @@ go_library( name = "intsets", srcs = [ "bitmap.go", + "fast.go", # keep "fast_large.go", "fast_small.go", "fast_str.go", "fast_testonly.go", + "oracle.go", + "sparse.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/util/intsets", visibility = ["//visibility:public"], - deps = [ - "@com_github_cockroachdb_errors//:errors", - "@org_golang_x_tools//container/intsets", - ], + deps = ["@com_github_cockroachdb_errors//:errors"], ) go_test( name = "intsets_test", - srcs = ["fast_test.go"], - args = ["-test.timeout=295s"], + size = "small", + srcs = [ + "fast_test.go", + "sparse_test.go", + ], + args = ["-test.timeout=55s"], embed = [":intsets"], deps = ["//pkg/util/randutil"], ) diff --git a/pkg/util/intsets/fast.go b/pkg/util/intsets/fast.go index 348fba46e52d..ab662e069c03 100644 --- a/pkg/util/intsets/fast.go +++ b/pkg/util/intsets/fast.go @@ -20,7 +20,6 @@ import ( "math/bits" "github.com/cockroachdb/errors" - "golang.org/x/tools/container/intsets" ) // Fast keeps track of a set of integers. It does not perform any @@ -31,44 +30,9 @@ type Fast struct { small bitmap // large is only allocated if values are added to the set that are not in // the range [0, smallCutoff). - // - // The implementation of intsets.Sparse is a circular, doubly-linked list of - // blocks. Each block contains a 256-bit bitmap and an offset. A block with - // offset=n contains a value n+i if the i-th bit of the bitmap is set. Block - // offsets are always divisible by 256. - // - // For example, here is a diagram of the set {0, 1, 256, 257, 512}, where - // each block is denoted by {offset, bitmap}: - // - // ---> {0, ..011} <----> {256, ..011} <----> {512, ..001} <--- - // | | - // ------------------------------------------------------------ - // - // Fast stores only values outside the range [0, smallCutoff) in - // large. Values less than 0 are stored in large as-is. For values greater - // than or equal to smallCutoff, we subtract by smallCutoff before storing - // them in large. When they are retrieved from large, we add smallCutoff to - // get the original value. For example, if 300 is added to the Fast, - // it would be added to large as the value (300 - smallCutoff). - // - // This scheme better utilizes the block with offset=0 compared to an - // alternative implementation where small and large contain overlapping - // values. For example, consider the set {0, 200, 300}. In the overlapping - // implementation, two blocks would be allocated: a block with offset=0 - // would store 0 and 200, and a block with offset=256 would store 300. By - // omitting values in the range [0, smallCutoff) in large, only one block is - // allocated: a block with offset=0 that stores 200-smallCutoff and - // 300-smallCutoff. - large *intsets.Sparse + large *Sparse } -const ( - // MaxInt is the maximum integer that a set can contain. - MaxInt = intsets.MaxInt - // MinInt is the maximum integer that a set can contain. - MinInt = intsets.MinInt -) - // MakeFast returns a set initialized with the given values. func MakeFast(vals ...int) Fast { var res Fast @@ -80,8 +44,10 @@ func MakeFast(vals ...int) Fast { // fitsInSmall returns whether all elements in this set are between 0 and // smallCutoff. +// +//gcassert:inline func (s *Fast) fitsInSmall() bool { - return s.large == nil || s.large.IsEmpty() + return s.large == nil || s.large.Empty() } // Add adds a value to the set. No-op if the value is already in the set. If the @@ -93,12 +59,12 @@ func (s *Fast) Add(i int) { return } if s.large == nil { - s.large = new(intsets.Sparse) + s.large = new(Sparse) } if i >= smallCutoff { i -= smallCutoff } - s.large.Insert(i) + s.large.Add(i) } // AddRange adds values 'from' up to 'to' (inclusively) to the set. @@ -142,14 +108,14 @@ func (s Fast) Contains(i int) bool { if i >= smallCutoff { i -= smallCutoff } - return s.large.Has(i) + return s.large.Contains(i) } return false } // Empty returns true if the set is empty. func (s Fast) Empty() bool { - return s.small == bitmap{} && (s.large == nil || s.large.IsEmpty()) + return s.small == bitmap{} && (s.large == nil || s.large.Empty()) } // Len returns the number of the elements in the set. @@ -185,9 +151,9 @@ func (s Fast) Next(startVal int) (int, bool) { startVal = 0 } res := s.large.LowerBound(startVal) - return res + smallCutoff, res != intsets.MaxInt + return res + smallCutoff, res != MaxInt } - return intsets.MaxInt, false + return MaxInt, false } // ForEach calls a function for each value in the set (in increasing order). @@ -208,7 +174,7 @@ func (s Fast) ForEach(f func(i int)) { v &^= 1 << uint(i) } if !s.fitsInSmall() { - for x := s.large.LowerBound(0); x != intsets.MaxInt; x = s.large.LowerBound(x + 1) { + for x := s.large.LowerBound(0); x != MaxInt; x = s.large.LowerBound(x + 1) { f(x + smallCutoff) } } @@ -230,8 +196,8 @@ func (s Fast) Ordered() []int { func (s Fast) Copy() Fast { var c Fast c.small = s.small - if s.large != nil && !s.large.IsEmpty() { - c.large = new(intsets.Sparse) + if s.large != nil && !s.large.Empty() { + c.large = new(Sparse) c.large.Copy(s.large) } return c @@ -241,9 +207,9 @@ func (s Fast) Copy() Fast { // independently. func (s *Fast) CopyFrom(other Fast) { s.small = other.small - if other.large != nil && !other.large.IsEmpty() { + if other.large != nil && !other.large.Empty() { if s.large == nil { - s.large = new(intsets.Sparse) + s.large = new(Sparse) } s.large.Copy(other.large) } else { @@ -256,12 +222,12 @@ func (s *Fast) CopyFrom(other Fast) { // UnionWith adds all the elements from rhs to this set. func (s *Fast) UnionWith(rhs Fast) { s.small.UnionWith(rhs.small) - if rhs.large == nil || rhs.large.IsEmpty() { + if rhs.large == nil || rhs.large.Empty() { // Fast path. return } if s.large == nil { - s.large = new(intsets.Sparse) + s.large = new(Sparse) } s.large.UnionWith(rhs.large) } diff --git a/pkg/util/intsets/fast_testonly.go b/pkg/util/intsets/fast_testonly.go index 4b388b29db36..0ba78c3ef040 100644 --- a/pkg/util/intsets/fast_testonly.go +++ b/pkg/util/intsets/fast_testonly.go @@ -24,7 +24,6 @@ import ( "io" "github.com/cockroachdb/errors" - "golang.org/x/tools/container/intsets" ) // Fast keeps track of a set of integers. It does not perform any @@ -32,7 +31,7 @@ import ( type Fast struct { // Used to keep the size of the struct the same. _ struct{ lo, hi uint64 } - s *intsets.Sparse + s *Sparse } // MakeFast returns a set initialized with the given values. @@ -46,7 +45,7 @@ func MakeFast(vals ...int) Fast { func (s *Fast) prepareForMutation() { if s.s == nil { - s.s = &intsets.Sparse{} + s.s = &Sparse{} } else if fastIntSetAlwaysSmall { // We always make a full copy to prevent any aliasing; this simulates the // semantics of the "small" regime of Fast. @@ -99,10 +98,10 @@ func (s Fast) Len() int { // value, the second return value is false. func (s Fast) Next(startVal int) (int, bool) { if s.s == nil { - return intsets.MaxInt, false + return MaxInt, false } res := s.s.LowerBound(startVal) - return res, res != intsets.MaxInt + return res, res != MaxInt } // ForEach calls a function for each value in the set (in increasing order). @@ -110,7 +109,7 @@ func (s Fast) ForEach(f func(i int)) { if s.s == nil { return } - for x := s.s.Min(); x != intsets.MaxInt; x = s.s.LowerBound(x + 1) { + for x := s.s.Min(); x != MaxInt; x = s.s.LowerBound(x + 1) { f(x) } } @@ -125,7 +124,7 @@ func (s Fast) Ordered() []int { // Copy returns a copy of s which can be modified independently. func (s Fast) Copy() Fast { - n := &intsets.Sparse{} + n := &Sparse{} if s.s != nil { n.Copy(s.s) } @@ -217,7 +216,7 @@ func (s Fast) SubsetOf(rhs Fast) bool { // Shift generates a new set which contains elements i+delta for elements i in // the original set. func (s *Fast) Shift(delta int) Fast { - n := &intsets.Sparse{} + n := &Sparse{} s.ForEach(func(i int) { n.Insert(i + delta) }) diff --git a/pkg/util/intsets/oracle.go b/pkg/util/intsets/oracle.go new file mode 100644 index 000000000000..62fdb4566749 --- /dev/null +++ b/pkg/util/intsets/oracle.go @@ -0,0 +1,139 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package intsets + +// oracle implements the same API as Sparse using a map. It is only used for +// testing. +type oracle struct { + m map[int]struct{} +} + +// Clear empties the set. +func (o *oracle) Clear() { + o.m = nil +} + +// Add adds an integer to the set. +func (o *oracle) Add(i int) { + if o.m == nil { + o.m = make(map[int]struct{}) + } + o.m[i] = struct{}{} +} + +// Remove removes an integer from the set. +func (o *oracle) Remove(i int) { + delete(o.m, i) +} + +// Contains returns true if the set contains the given integer. +func (o oracle) Contains(i int) bool { + _, ok := o.m[i] + return ok +} + +// Empty returns true if the set contains no integers. +func (o oracle) Empty() bool { + return len(o.m) == 0 +} + +// Len returns the number of integers in the set. +func (o oracle) Len() int { + return len(o.m) +} + +// LowerBound returns the smallest element >= startVal, or MaxInt if there is no +// such element. +func (o *oracle) LowerBound(startVal int) int { + lb := MaxInt + for i := range o.m { + if i >= startVal && i < lb { + lb = i + } + } + return lb +} + +// Min returns the minimum value in the set. If the set is empty, MaxInt is +// returned. +func (o *oracle) Min() int { + return o.LowerBound(MinInt) +} + +// Copy sets the receiver to a copy of rhs, which can then be modified +// independently. +func (o *oracle) Copy(rhs *oracle) { + o.Clear() + for i := range rhs.m { + o.Add(i) + } +} + +// UnionWith adds all the elements from rhs to this set. +func (o *oracle) UnionWith(rhs *oracle) { + for i := range rhs.m { + o.Add(i) + } +} + +// IntersectionWith removes any elements not in rhs from this set. +func (o *oracle) IntersectionWith(rhs *oracle) { + for i := range o.m { + if !rhs.Contains(i) { + o.Remove(i) + } + } +} + +// Intersects returns true if s has any elements in common with rhs. +func (o *oracle) Intersects(rhs *oracle) bool { + for i := range o.m { + if rhs.Contains(i) { + return true + } + } + return false +} + +// DifferenceWith removes any elements in rhs from this set. +func (o *oracle) DifferenceWith(rhs *oracle) { + for i := range rhs.m { + o.Remove(i) + } +} + +// Equals returns true if the two sets are identical. +func (o *oracle) Equals(rhs *oracle) bool { + if len(o.m) != len(rhs.m) { + return false + } + for i := range o.m { + if !rhs.Contains(i) { + return false + } + } + for i := range rhs.m { + if !o.Contains(i) { + return false + } + } + return true +} + +// SubsetOf returns true if rhs contains all the elements in s. +func (o *oracle) SubsetOf(rhs *oracle) bool { + for i := range o.m { + if !rhs.Contains(i) { + return false + } + } + return true +} diff --git a/pkg/util/intsets/sparse.go b/pkg/util/intsets/sparse.go new file mode 100644 index 000000000000..d0280f7e4186 --- /dev/null +++ b/pkg/util/intsets/sparse.go @@ -0,0 +1,391 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package intsets + +// Sparse is a set of integers. It is not thread-safe. It must be copied with +// the Copy method. +// +// Sparse is implemented as a linked list of blocks, each containing an offset +// and a bitmap. A block with offset=o contains an integer o+b if the b-th bit +// of the bitmap is set. Block offsets are always divisible by smallCutoff. +// +// For example, here is a diagram of the set {0, 1, 128, 129, 512}, where +// each block is denoted by {offset, bitmap}: +// +// {0, ..011} ---> {128, ..011} ---> {512, ..001} +// +// Sparse is inspired by golang.org/x/tools/container/intsets. Sparse implements +// a smaller API, providing only the methods required by Fast. The omission of a +// Max method allows us to use a singly-linked list here instead of a +// circular, doubly-linked list. +type Sparse struct { + root block +} + +// block is a node in a singly-linked list with an offset and a bitmap. A block +// with offset=o contains an integer o+b if the b-th bit of the bitmap is set. +type block struct { + offset int + bits bitmap + next *block +} + +const ( + // MaxInt is the maximum integer that can be stored in a set. + MaxInt = int(^uint(0) >> 1) + // MinInt is the minimum integer that can be stored in a set. + MinInt = -MaxInt - 1 + + smallCutoffMask = smallCutoff - 1 +) + +func init() { + if smallCutoff == 0 || (smallCutoff&smallCutoffMask) != 0 { + panic("smallCutoff must be a power of two; see offset and bit") + } +} + +// offset returns the block offset for the given integer. +// Note: Bitwise AND NOT only works here because smallCutoff is a power of two. +// +//gcassert:inline +func offset(i int) int { + return i &^ smallCutoffMask +} + +// bit returns the bit within a block that should be set for the given integer. +// Note: Bitwise AND only works here because smallCutoff is a power of two. +// +//gcassert:inline +func bit(i int) int { + return i & smallCutoffMask +} + +// empty returns true if the block is empty, i.e., none of its bits have been +// set. +// +//gcassert:inline +func (s block) empty() bool { + return s.bits == bitmap{} +} + +// insertBlock inserts a block after prev and returns it. If prev is nil, a +// block is inserted at the front of the list. +func (s *Sparse) insertBlock(prev *block) *block { + if s.Empty() { + return &s.root + } + if prev == nil { + // Insert a new block at the front of the list. + second := s.root + s.root = block{} + s.root.next = &second + return &s.root + } + // Insert a new block in the middle of the list. + n := block{} + n.next = prev.next + prev.next = &n + return &n +} + +// removeBlock removes a block from the list. prev must be the block before b. +func (s *Sparse) removeBlock(prev, b *block) *block { + if prev == nil { + if b.next == nil { + s.root = block{} + return nil + } + s.root.offset = b.next.offset + s.root.bits = b.next.bits + s.root.next = b.next.next + return &s.root + } + prev.next = prev.next.next + return prev.next +} + +// Clear empties the set. +func (s *Sparse) Clear() { + s.root = block{} +} + +// Add adds an integer to the set. +func (s *Sparse) Add(i int) { + o := offset(i) + b := bit(i) + var last *block + for sb := &s.root; sb != nil && sb.offset <= o; sb = sb.next { + if sb.offset == o { + sb.bits.Set(b) + return + } + last = sb + } + n := s.insertBlock(last) + n.offset = o + n.bits.Set(b) +} + +// Remove removes an integer from the set. +func (s *Sparse) Remove(i int) { + o := offset(i) + b := bit(i) + var last *block + for sb := &s.root; sb != nil && sb.offset <= o; sb = sb.next { + if sb.offset == o { + sb.bits.Unset(b) + if sb.empty() { + s.removeBlock(last, sb) + } + return + } + last = sb + } +} + +// Contains returns true if the set contains the given integer. +func (s Sparse) Contains(i int) bool { + o := offset(i) + b := bit(i) + for sb := &s.root; sb != nil && sb.offset <= o; sb = sb.next { + if sb.offset == o { + return sb.bits.IsSet(b) + } + } + return false +} + +// Empty returns true if the set contains no integers. +func (s Sparse) Empty() bool { + return s.root.empty() +} + +// Len returns the number of integers in the set. +func (s Sparse) Len() int { + l := 0 + for sb := &s.root; sb != nil; sb = sb.next { + l += sb.bits.OnesCount() + } + return l +} + +// LowerBound returns the smallest element >= startVal, or MaxInt if there is no +// such element. +func (s *Sparse) LowerBound(startVal int) int { + if s.Empty() { + return MaxInt + } + o := offset(startVal) + b := bit(startVal) + for sb := &s.root; sb != nil; sb = sb.next { + if sb.offset > o { + v, _ := sb.bits.Next(0) + return v + sb.offset + } + if sb.offset == o { + if v, ok := sb.bits.Next(b); ok { + return v + sb.offset + } + } + } + return MaxInt +} + +// Min returns the minimum value in the set. If the set is empty, MaxInt is +// returned. +func (s *Sparse) Min() int { + if s.Empty() { + return MaxInt + } + b := s.root + v, _ := b.bits.Next(0) + return v + b.offset +} + +// Copy sets the receiver to a copy of rhs, which can then be modified +// independently. +func (s *Sparse) Copy(rhs *Sparse) { + var last *block + sb := &s.root + rb := &rhs.root + for rb != nil { + if sb == nil { + sb = s.insertBlock(last) + } + sb.offset = rb.offset + sb.bits = rb.bits + last = sb + sb = sb.next + rb = rb.next + } + if last != nil { + last.next = nil + } +} + +// UnionWith adds all the elements from rhs to this set. +func (s *Sparse) UnionWith(rhs *Sparse) { + if rhs.Empty() { + return + } + + var last *block + sb := &s.root + rb := &rhs.root + for rb != nil { + if sb != nil && sb.offset == rb.offset { + sb.bits.UnionWith(rb.bits) + rb = rb.next + } else if sb == nil || sb.offset > rb.offset { + sb = s.insertBlock(last) + sb.offset = rb.offset + sb.bits = rb.bits + rb = rb.next + } + last = sb + sb = sb.next + } +} + +// IntersectionWith removes any elements not in rhs from this set. +func (s *Sparse) IntersectionWith(rhs *Sparse) { + var last *block + sb := &s.root + rb := &rhs.root + for sb != nil && rb != nil { + switch { + case sb.offset > rb.offset: + rb = rb.next + case sb.offset < rb.offset: + sb = s.removeBlock(last, sb) + default: + sb.bits.IntersectionWith(rb.bits) + if !sb.empty() { + // If sb is not empty, then advance sb and last. + // + // If sb is empty, we advance neither sb nor last so that the + // empty sb will be removed in the next iteration of the loop + // (the sb.offset < rb.offset case), or after the loop (see the + // comment below). + last = sb + sb = sb.next + } + rb = rb.next + } + } + if sb == &s.root { + // This is a special case that only happens when all the following are + // true: + // + // 1. Either s or rhs has a single block. + // 2. The first blocks of s and rhs have matching offsets. + // 3. The intersection of the first blocks of s and rhs yields an + // empty block. + // + // In this case, the root block would not have been removed in the loop, + // and it may have a non-zero offset and a non-nil next block, so we + // clear it here. + s.root = block{} + } + if last != nil { + // At this point, last is a pointer to the last block in s that we've + // intersected with a block in rhs. If there are no remaining blocks in + // s, then last.next will be nil. If there are no remaining blocks in + // rhs, then we must remove any blocks after last. Unconditionally + // clearing last.next works in both cases. + last.next = nil + } +} + +// Intersects returns true if s has any elements in common with rhs. +func (s *Sparse) Intersects(rhs *Sparse) bool { + sb := &s.root + rb := &rhs.root + for sb != nil && rb != nil { + switch { + case sb.offset > rb.offset: + rb = rb.next + case sb.offset < rb.offset: + sb = sb.next + default: + if sb.bits.Intersects(rb.bits) { + return true + } + sb = sb.next + rb = rb.next + } + } + return false +} + +// DifferenceWith removes any elements in rhs from this set. +func (s *Sparse) DifferenceWith(rhs *Sparse) { + var last *block + sb := &s.root + rb := &rhs.root + for sb != nil && rb != nil { + switch { + case sb.offset > rb.offset: + rb = rb.next + case sb.offset < rb.offset: + last = sb + sb = sb.next + default: + sb.bits.DifferenceWith(rb.bits) + if sb.empty() { + sb = s.removeBlock(last, sb) + } else { + last = sb + sb = sb.next + } + rb = rb.next + } + } +} + +// Equals returns true if the two sets are identical. +func (s *Sparse) Equals(rhs *Sparse) bool { + sb := &s.root + rb := &rhs.root + for sb != nil && rb != nil { + if sb.offset != rb.offset || sb.bits != rb.bits { + return false + } + sb = sb.next + rb = rb.next + } + return sb == nil && rb == nil +} + +// SubsetOf returns true if rhs contains all the elements in s. +func (s *Sparse) SubsetOf(rhs *Sparse) bool { + if s.Empty() { + return true + } + sb := &s.root + rb := &rhs.root + for sb != nil && rb != nil { + if sb.offset > rb.offset { + rb = rb.next + continue + } + if sb.offset < rb.offset { + return false + } + if !sb.bits.SubsetOf(rb.bits) { + return false + } + sb = sb.next + rb = rb.next + } + return sb == nil +} diff --git a/pkg/util/intsets/sparse_test.go b/pkg/util/intsets/sparse_test.go new file mode 100644 index 000000000000..f934c0e2a3ce --- /dev/null +++ b/pkg/util/intsets/sparse_test.go @@ -0,0 +1,175 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package intsets + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/cockroachdb/cockroach/pkg/util/randutil" +) + +const ( + // The number of operations to perform for each test configuration. + numOps = 10000 + // The max size of sets returned by randomSets(). + maxRandSetSize = 20 +) + +func TestSparse(t *testing.T) { + for _, minVal := range []int{-100_000, -smallCutoff * 10, -smallCutoff, 0, smallCutoff, smallCutoff * 10} { + for _, maxVal := range []int{1, smallCutoff, smallCutoff * 10, 100_000} { + if maxVal <= minVal { + continue + } + // We are using Parallel, we need to make local instances of the + // loop variables. + minVal := minVal + maxVal := maxVal + t.Run(fmt.Sprintf("%d_%d", minVal, maxVal), func(t *testing.T) { + t.Parallel() // SAFE FOR TESTING (this comment is for the linter) + rng, _ := randutil.NewTestRand() + o := new(oracle) + s := new(Sparse) + + for i := 0; i < numOps; i++ { + v := minVal + rng.Intn(maxVal-minVal) + switch rng.Intn(4) { + case 0: + // Add operation. + o.Add(v) + s.Add(v) + case 1: + // Remove operation. + o.Remove(v) + s.Remove(v) + case 2: + // Copy operation. + oCopy := new(oracle) + oCopy.Copy(o) + o = oCopy + sCopy := new(Sparse) + sCopy.Copy(s) + s = sCopy + case 3: + // Clear operation infrequently. + if rng.Intn(20) == 0 { + o.Clear() + s.Clear() + } + } + validateSparseSet(t, o, s) + } + }) + } + } +} + +func TestSparseSetOps(t *testing.T) { + for _, minVal := range []int{-100_000, -smallCutoff * 10, -smallCutoff, 0, smallCutoff, smallCutoff * 10} { + for _, maxVal := range []int{1, smallCutoff, smallCutoff * 10, 100_000} { + if maxVal <= minVal { + continue + } + // We are using Parallel, we need to make local instances of the + // loop variables. + minVal := minVal + maxVal := maxVal + t.Run(fmt.Sprintf("%d_%d", minVal, maxVal), func(t *testing.T) { + t.Parallel() // SAFE FOR TESTING (this comment is for the linter) + rng, _ := randutil.NewTestRand() + o, s := randomSets(rng, minVal, maxVal) + + for i := 0; i < numOps; i++ { + oo, so := randomSets(rng, minVal, maxVal) + // Test boolean methods. + if o.Intersects(oo) != s.Intersects(so) { + t.Fatal("expected sparse sets to intersect") + } + if o.Equals(oo) != s.Equals(so) { + t.Fatal("expected sparse sets to intersect") + } + if o.SubsetOf(oo) != s.SubsetOf(so) { + t.Fatal("expected sparse sets to intersect") + } + + // Perform a set operation. + switch rng.Intn(4) { + case 0: + // Copy operation. + o.Copy(oo) + s.Copy(so) + case 1: + // UnionWith operation. + o.UnionWith(oo) + s.UnionWith(so) + case 2: + // IntersectionWith operation. + o.IntersectionWith(oo) + s.IntersectionWith(so) + case 3: + // DifferenceWith operation. + o.DifferenceWith(oo) + s.DifferenceWith(so) + } + validateSparseSet(t, o, s) + } + }) + } + } +} + +func validateSparseSet(t *testing.T, o *oracle, s *Sparse) { + if !s.Equals(s) { + t.Fatal("expected sparse set to equal itself") + } + if !s.Empty() && !s.Intersects(s) { + t.Fatal("expected non-empty sparse set to intersect with itself") + } + if !s.SubsetOf(s) { + t.Fatal("expected sparse set to be subset of itself") + } + if o.Len() != s.Len() { + t.Fatalf("expected sparse set to have len %d, found %d", o.Len(), s.Len()) + } + if o.Empty() != s.Empty() { + neg := "" + if !o.Empty() { + neg = "not " + } + t.Fatalf("expected sparse set to %sbe empty", neg) + } + if o.Min() != s.Min() { + t.Fatalf("expected sparse set to have minimum of %d, found %d", o.Min(), s.Min()) + } + for i := o.LowerBound(MinInt); i < MaxInt; i = o.LowerBound(i + 1) { + if !s.Contains(i) { + t.Fatalf("expected sparse set to contain %d", i) + } + } + for i := s.LowerBound(MinInt); i < MaxInt; i = s.LowerBound(i + 1) { + if !o.Contains(i) { + t.Fatalf("expected sparse set to not contain %d", i) + } + } +} + +func randomSets(rng *rand.Rand, minVal, maxVal int) (*oracle, *Sparse) { + o := new(oracle) + s := new(Sparse) + for i, n := 0, rng.Intn(maxRandSetSize); i < n; i++ { + v := minVal + rng.Intn(maxVal-minVal) + o.Add(v) + s.Add(v) + } + return o, s +} From c760db73c0e3ce8b8062b7466dc8f1ca0aa6565f Mon Sep 17 00:00:00 2001 From: Marcus Gartner Date: Fri, 14 Oct 2022 14:39:23 -0400 Subject: [PATCH 6/7] intsets: do not shift values in Fast.large Previously, `intsets.Fast` shifted values by 128 when storing them in `Fast.large` to minimize allocations within `Fast.large` (see the deleted comments in the diff). Now that `intsets.Sparse`'s uses blocks with 128-bit bitmaps instead of 256-bit bitmaps, this shift no longer provides any benefit, so it has been removed. Release note: None --- pkg/util/intsets/fast.go | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/pkg/util/intsets/fast.go b/pkg/util/intsets/fast.go index ab662e069c03..6081642dd8d9 100644 --- a/pkg/util/intsets/fast.go +++ b/pkg/util/intsets/fast.go @@ -61,9 +61,6 @@ func (s *Fast) Add(i int) { if s.large == nil { s.large = new(Sparse) } - if i >= smallCutoff { - i -= smallCutoff - } s.large.Add(i) } @@ -92,9 +89,6 @@ func (s *Fast) Remove(i int) { return } if s.large != nil { - if i >= smallCutoff { - i -= smallCutoff - } s.large.Remove(i) } } @@ -105,9 +99,6 @@ func (s Fast) Contains(i int) bool { return s.small.IsSet(i) } if s.large != nil { - if i >= smallCutoff { - i -= smallCutoff - } return s.large.Contains(i) } return false @@ -145,13 +136,8 @@ func (s Fast) Next(startVal int) (int, bool) { } } if s.large != nil { - startVal -= smallCutoff - if startVal < 0 { - // We already searched for negative values in large above. - startVal = 0 - } res := s.large.LowerBound(startVal) - return res + smallCutoff, res != MaxInt + return res, res != MaxInt } return MaxInt, false } @@ -175,7 +161,7 @@ func (s Fast) ForEach(f func(i int)) { } if !s.fitsInSmall() { for x := s.large.LowerBound(0); x != MaxInt; x = s.large.LowerBound(x + 1) { - f(x + smallCutoff) + f(x) } } } From f20c0b778265503a25fb7f0f3e64fbfa6ca2d216 Mon Sep 17 00:00:00 2001 From: Jayant Shrivastava Date: Tue, 20 Dec 2022 09:49:47 -0500 Subject: [PATCH 7/7] sqlsmith: add crdb_internal.job_payload_type to blocklist Previously, we would test the `crdb_internal.job_payload_type` builtin function with random values. Since this function unmarshals a jobspb.Payload, calling it with random bytes will always produce an error. We do not need to test this function under the smither. Epic: none Fixes: #93843 Release note: None --- pkg/internal/sqlsmith/schema.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/internal/sqlsmith/schema.go b/pkg/internal/sqlsmith/schema.go index 50a932d7362f..cc36d689fcab 100644 --- a/pkg/internal/sqlsmith/schema.go +++ b/pkg/internal/sqlsmith/schema.go @@ -513,6 +513,9 @@ var functions = func() map[tree.FunctionClass]map[oid.Oid][]function { "crdb_internal.revalidate_unique_constraint", "crdb_internal.request_statement_bundle", "crdb_internal.set_compaction_concurrency", + // crdb_internal.job_payload_type unmarshals a jobspb.Payload from + // raw bytes. Calling it with random values will produce an error. + "crdb_internal.job_payload_type", } { skip = skip || strings.Contains(def.Name, substr) }