From 42f7f08a30207ebb97e63437255554ae519f7a25 Mon Sep 17 00:00:00 2001 From: Nathan VanBenschoten Date: Mon, 29 Mar 2021 18:51:50 -0400 Subject: [PATCH 1/5] sql: gate global_reads zone attribute on cluster version and enterprise license Fixes #61039. This commit gates the use of the global_reads zone configuration attribute on the cluster version and on the use of an enterprise license. The first half of this is necessary for correctness. The second half is not, because follower reads won't be served from global read followers without an enterprise license, but a check when the zone configs are set improves UX. --- .../logic_test/multi_region_mixed_20.2_21.1 | 18 +++ pkg/ccl/logictestccl/testdata/logic_test/zone | 29 ++++ pkg/ccl/multiregionccl/multiregion_test.go | 43 ++++++ .../logictest/testdata/logic_test/zone_config | 10 +- pkg/sql/set_zone_config.go | 133 +++++++++++++----- 5 files changed, 191 insertions(+), 42 deletions(-) diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_mixed_20.2_21.1 b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_mixed_20.2_21.1 index 1d344181e1ff..21f37f972e27 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_mixed_20.2_21.1 +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_mixed_20.2_21.1 @@ -26,3 +26,21 @@ CREATE TABLE t() statement error cannot alter a table's LOCALITY if its database is not multi-region enabled ALTER TABLE t SET LOCALITY REGIONAL BY ROW + +statement error pgcode 0A000 global_reads cannot be used until cluster version is finalized +ALTER TABLE t CONFIGURE ZONE USING global_reads = true + +statement error pgcode 0A000 global_reads cannot be used until cluster version is finalized +ALTER TABLE t CONFIGURE ZONE USING global_reads = false + +statement error pgcode 0A000 num_voters cannot be used until cluster version is finalized +ALTER TABLE t CONFIGURE ZONE USING num_voters = 3 + +statement error pgcode 0A000 num_voters cannot be used until cluster version is finalized +ALTER TABLE t CONFIGURE ZONE USING num_voters = 0 + +statement error pgcode 0A000 voter_constraints cannot be used until cluster version is finalized +ALTER TABLE t CONFIGURE ZONE USING voter_constraints = '[+region=us-east-1]' + +statement error pgcode 0A000 voter_constraints cannot be used until cluster version is finalized +ALTER TABLE t CONFIGURE ZONE USING voter_constraints = '' diff --git a/pkg/ccl/logictestccl/testdata/logic_test/zone b/pkg/ccl/logictestccl/testdata/logic_test/zone index a1b7a76d0de3..274300d4d4b1 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/zone +++ b/pkg/ccl/logictestccl/testdata/logic_test/zone @@ -1126,3 +1126,32 @@ ALTER PARTITION p3 OF INDEX test.public.t@i2 CONFIGURE ZONE USING gc.ttlseconds = 15411; ALTER PARTITION p4 OF INDEX test.public.t@i2 CONFIGURE ZONE USING gc.ttlseconds = 15418 + +# Test that the global_reads attribute can be set with an enterprise license. +# This same test fails in pkg/sql/logictest/testdata/logic_test/zone_config. +statement ok +CREATE TABLE global (id INT PRIMARY KEY) + +statement ok +ALTER TABLE global CONFIGURE ZONE USING global_reads = true + +# This should reflect in the metrics. +query T +SELECT feature_name FROM crdb_internal.feature_usage +WHERE feature_name IN ( + 'sql.schema.zone_config.table.global_reads' +) AND usage_count > 0 ORDER BY feature_name +---- +sql.schema.zone_config.table.global_reads + +query TT +SHOW ZONE CONFIGURATION FOR TABLE global +---- +TABLE global ALTER TABLE global CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + global_reads = true, + num_replicas = 7, + constraints = '[]', + lease_preferences = '[]' diff --git a/pkg/ccl/multiregionccl/multiregion_test.go b/pkg/ccl/multiregionccl/multiregion_test.go index 85710015de04..eb56bb1d92d6 100644 --- a/pkg/ccl/multiregionccl/multiregion_test.go +++ b/pkg/ccl/multiregionccl/multiregion_test.go @@ -117,3 +117,46 @@ func TestMultiRegionAfterEnterpriseDisabled(t *testing.T) { } }) } + +func TestGlobalReadsAfterEnterpriseDisabled(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + defer utilccl.TestingEnableEnterprise()() + + _, sqlDB, cleanup := multiregionccltestutils.TestingCreateMultiRegionCluster( + t, 1 /* numServers */, base.TestingKnobs{}, nil, /* baseDir */ + ) + defer cleanup() + + for _, setupQuery := range []string{ + `CREATE DATABASE test`, + `USE test`, + `CREATE TABLE t1 ()`, + `CREATE TABLE t2 ()`, + } { + _, err := sqlDB.Exec(setupQuery) + require.NoError(t, err) + } + + // Can set global_reads with enterprise license enabled. + _, err := sqlDB.Exec(`ALTER TABLE t1 CONFIGURE ZONE USING global_reads = true`) + require.NoError(t, err) + + _, err = sqlDB.Exec(`ALTER TABLE t2 CONFIGURE ZONE USING global_reads = true`) + require.NoError(t, err) + + // Can unset global_reads with enterprise license enabled. + _, err = sqlDB.Exec(`ALTER TABLE t1 CONFIGURE ZONE USING global_reads = false`) + require.NoError(t, err) + + defer utilccl.TestingDisableEnterprise()() + + // Cannot set global_reads with enterprise license disabled. + _, err = sqlDB.Exec(`ALTER TABLE t1 CONFIGURE ZONE USING global_reads = true`) + require.Error(t, err) + require.Regexp(t, "use of global_reads requires an enterprise license", err) + + // Can unset global_reads with enterprise license disabled. + _, err = sqlDB.Exec(`ALTER TABLE t2 CONFIGURE ZONE USING global_reads = false`) + require.NoError(t, err) +} diff --git a/pkg/sql/logictest/testdata/logic_test/zone_config b/pkg/sql/logictest/testdata/logic_test/zone_config index c09367cb7473..6474b2d8bf14 100644 --- a/pkg/sql/logictest/testdata/logic_test/zone_config +++ b/pkg/sql/logictest/testdata/logic_test/zone_config @@ -75,7 +75,6 @@ ALTER TABLE a CONFIGURE ZONE USING range_min_bytes = 200000 + 1, range_max_bytes = 300000 + 1, gc.ttlseconds = 3000 + 600, - global_reads = true, num_replicas = floor(1.2)::int, constraints = '[+region=test]', lease_preferences = '[[+region=test]]' @@ -87,14 +86,12 @@ WHERE feature_name IN ( 'sql.schema.zone_config.table.range_min_bytes', 'sql.schema.zone_config.table.range_max_bytes', 'sql.schema.zone_config.table.gc.ttlseconds', - 'sql.schema.zone_config.table.global_reads', 'sql.schema.zone_config.table.num_replicas', 'sql.schema.zone_config.table.constraints' ) AND usage_count > 0 ORDER BY feature_name ---- sql.schema.zone_config.table.constraints sql.schema.zone_config.table.gc.ttlseconds -sql.schema.zone_config.table.global_reads sql.schema.zone_config.table.num_replicas sql.schema.zone_config.table.range_max_bytes sql.schema.zone_config.table.range_min_bytes @@ -106,7 +103,6 @@ SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] range_min_bytes = 200001, range_max_bytes = 300001, gc.ttlseconds = 3600, - global_reads = true, num_replicas = 1, constraints = '[+region=test]', lease_preferences = '[[+region=test]]' @@ -122,7 +118,6 @@ SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] range_min_bytes = 200001, range_max_bytes = 400000, gc.ttlseconds = 3600, - global_reads = true, num_replicas = 1, constraints = '[+region=test]', lease_preferences = '[[+region=test]]' @@ -159,7 +154,6 @@ SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] range_min_bytes = 200001, range_max_bytes = 400000, gc.ttlseconds = 3600, - global_reads = true, num_replicas = 1, constraints = '[+region=test]', lease_preferences = '[[+region=test]]' @@ -266,6 +260,10 @@ SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] voter_constraints = '{+region=test: 1}', lease_preferences = '[]' +# Check that global_reads cannot be set without a CCL binary and enterprise license. +statement error OSS binaries do not include enterprise features +ALTER TABLE a CONFIGURE ZONE USING global_reads = true + # Check entities for which we can set zone configs. subtest test_entity_validity diff --git a/pkg/sql/set_zone_config.go b/pkg/sql/set_zone_config.go index 88276b855908..a5e250fdb3a4 100644 --- a/pkg/sql/set_zone_config.go +++ b/pkg/sql/set_zone_config.go @@ -16,6 +16,7 @@ import ( "sort" "strings" + "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" @@ -63,37 +64,86 @@ type setZoneConfigNode struct { var supportedZoneConfigOptions = map[tree.Name]struct { requiredType *types.T setter func(*zonepb.ZoneConfig, tree.Datum) + checkAllowed func(context.Context, *ExecutorConfig, tree.Datum) error // optional }{ - "range_min_bytes": {types.Int, func(c *zonepb.ZoneConfig, d tree.Datum) { c.RangeMinBytes = proto.Int64(int64(tree.MustBeDInt(d))) }}, - "range_max_bytes": {types.Int, func(c *zonepb.ZoneConfig, d tree.Datum) { c.RangeMaxBytes = proto.Int64(int64(tree.MustBeDInt(d))) }}, - "global_reads": {types.Bool, func(c *zonepb.ZoneConfig, d tree.Datum) { c.GlobalReads = proto.Bool(bool(tree.MustBeDBool(d))) }}, - "num_replicas": {types.Int, func(c *zonepb.ZoneConfig, d tree.Datum) { c.NumReplicas = proto.Int32(int32(tree.MustBeDInt(d))) }}, - "num_voters": {types.Int, func(c *zonepb.ZoneConfig, d tree.Datum) { c.NumVoters = proto.Int32(int32(tree.MustBeDInt(d))) }}, - "gc.ttlseconds": {types.Int, func(c *zonepb.ZoneConfig, d tree.Datum) { - c.GC = &zonepb.GCPolicy{TTLSeconds: int32(tree.MustBeDInt(d))} - }}, - "constraints": {types.String, func(c *zonepb.ZoneConfig, d tree.Datum) { - constraintsList := zonepb.ConstraintsList{ - Constraints: c.Constraints, - Inherited: c.InheritedConstraints, - } - loadYAML(&constraintsList, string(tree.MustBeDString(d))) - c.Constraints = constraintsList.Constraints - c.InheritedConstraints = false - }}, - "voter_constraints": {types.String, func(c *zonepb.ZoneConfig, d tree.Datum) { - voterConstraintsList := zonepb.ConstraintsList{ - Constraints: c.VoterConstraints, - Inherited: c.InheritedVoterConstraints, - } - loadYAML(&voterConstraintsList, string(tree.MustBeDString(d))) - c.VoterConstraints = voterConstraintsList.Constraints - c.InheritedVoterConstraints = false - }}, - "lease_preferences": {types.String, func(c *zonepb.ZoneConfig, d tree.Datum) { - loadYAML(&c.LeasePreferences, string(tree.MustBeDString(d))) - c.InheritedLeasePreferences = false - }}, + "range_min_bytes": { + requiredType: types.Int, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { c.RangeMinBytes = proto.Int64(int64(tree.MustBeDInt(d))) }, + }, + "range_max_bytes": { + requiredType: types.Int, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { c.RangeMaxBytes = proto.Int64(int64(tree.MustBeDInt(d))) }, + }, + "global_reads": { + requiredType: types.Bool, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { c.GlobalReads = proto.Bool(bool(tree.MustBeDBool(d))) }, + checkAllowed: func(ctx context.Context, execCfg *ExecutorConfig, d tree.Datum) error { + if err := checkVersionActive(ctx, execCfg, clusterversion.NonVotingReplicas, "global_reads"); err != nil { + return err + } + if !tree.MustBeDBool(d) { + // Always allow the value to be unset. + return nil + } + return base.CheckEnterpriseEnabled( + execCfg.Settings, + execCfg.ClusterID(), + execCfg.Organization(), + "global_reads", + ) + }, + }, + "num_replicas": { + requiredType: types.Int, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { c.NumReplicas = proto.Int32(int32(tree.MustBeDInt(d))) }, + }, + "num_voters": { + requiredType: types.Int, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { c.NumVoters = proto.Int32(int32(tree.MustBeDInt(d))) }, + checkAllowed: func(ctx context.Context, execCfg *ExecutorConfig, _ tree.Datum) error { + return checkVersionActive(ctx, execCfg, clusterversion.NonVotingReplicas, "num_voters") + }, + }, + "gc.ttlseconds": { + requiredType: types.Int, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { + c.GC = &zonepb.GCPolicy{TTLSeconds: int32(tree.MustBeDInt(d))} + }, + }, + "constraints": { + requiredType: types.String, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { + constraintsList := zonepb.ConstraintsList{ + Constraints: c.Constraints, + Inherited: c.InheritedConstraints, + } + loadYAML(&constraintsList, string(tree.MustBeDString(d))) + c.Constraints = constraintsList.Constraints + c.InheritedConstraints = false + }, + }, + "voter_constraints": { + requiredType: types.String, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { + voterConstraintsList := zonepb.ConstraintsList{ + Constraints: c.VoterConstraints, + Inherited: c.InheritedVoterConstraints, + } + loadYAML(&voterConstraintsList, string(tree.MustBeDString(d))) + c.VoterConstraints = voterConstraintsList.Constraints + c.InheritedVoterConstraints = false + }, + checkAllowed: func(ctx context.Context, execCfg *ExecutorConfig, _ tree.Datum) error { + return checkVersionActive(ctx, execCfg, clusterversion.NonVotingReplicas, "voter_constraints") + }, + }, + "lease_preferences": { + requiredType: types.String, + setter: func(c *zonepb.ZoneConfig, d tree.Datum) { + loadYAML(&c.LeasePreferences, string(tree.MustBeDString(d))) + c.InheritedLeasePreferences = false + }, + }, } // zoneOptionKeys contains the keys from suportedZoneConfigOptions in @@ -114,6 +164,16 @@ func loadYAML(dst interface{}, yamlString string) { } } +func checkVersionActive( + ctx context.Context, execCfg *ExecutorConfig, minVersion clusterversion.Key, option string, +) error { + if !execCfg.Settings.Version.IsActive(ctx, minVersion) { + return pgerror.Newf(pgcode.FeatureNotSupported, + "%s cannot be used until cluster version is finalized", option) + } + return nil +} + func (p *planner) SetZoneConfig(ctx context.Context, n *tree.SetZoneConfig) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, @@ -182,11 +242,6 @@ func (p *planner) SetZoneConfig(ctx context.Context, n *tree.SetZoneConfig) (pla return nil, pgerror.Newf(pgcode.InvalidParameterValue, "unsupported zone config parameter: %q", tree.ErrString(&opt.Key)) } - if (opt.Key == "num_voters" || opt.Key == "voter_constraints") && - !p.ExecCfg().Settings.Version.IsActive(ctx, clusterversion.NonVotingReplicas) { - return nil, pgerror.Newf(pgcode.FeatureNotSupported, - "num_voters and voter_constraints cannot be used until cluster version is finalized") - } telemetry.Inc( sqltelemetry.SchemaSetZoneConfigCounter( n.ZoneSpecifier.TelemetryName(), @@ -334,7 +389,13 @@ func (n *setZoneConfigNode) startExec(params runParams) error { return pgerror.Newf(pgcode.InvalidParameterValue, "unsupported NULL value for %q", tree.ErrString(name)) } - setter := supportedZoneConfigOptions[*name].setter + opt := supportedZoneConfigOptions[*name] + if opt.checkAllowed != nil { + if err := opt.checkAllowed(params.ctx, params.ExecCfg(), datum); err != nil { + return err + } + } + setter := opt.setter setters = append(setters, func(c *zonepb.ZoneConfig) { setter(c, datum) }) optionsStr = append(optionsStr, fmt.Sprintf("%s = %s", name, datum)) } From a3ba07382326c7f44e04f5f947d83f48eb3fd37d Mon Sep 17 00:00:00 2001 From: Andy Yang Date: Mon, 29 Mar 2021 20:38:31 -0400 Subject: [PATCH 2/5] geo/geomfn: refactor logic for point in polygon optimization Release note: None --- pkg/geo/geomfn/BUILD.bazel | 1 + pkg/geo/geomfn/binary_predicates.go | 10 +- pkg/geo/geomfn/distance.go | 121 +--------- pkg/geo/geomfn/point_polygon_optimization.go | 222 +++++++++++++++++++ 4 files changed, 229 insertions(+), 125 deletions(-) create mode 100644 pkg/geo/geomfn/point_polygon_optimization.go diff --git a/pkg/geo/geomfn/BUILD.bazel b/pkg/geo/geomfn/BUILD.bazel index 8d832a92a342..7cbd74d6c95d 100644 --- a/pkg/geo/geomfn/BUILD.bazel +++ b/pkg/geo/geomfn/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "make_geometry.go", "node.go", "orientation.go", + "point_polygon_optimization.go", "remove_repeated_points.go", "reverse.go", "segmentize.go", diff --git a/pkg/geo/geomfn/binary_predicates.go b/pkg/geo/geomfn/binary_predicates.go index 44ba23a79d4c..098a18b50a68 100644 --- a/pkg/geo/geomfn/binary_predicates.go +++ b/pkg/geo/geomfn/binary_predicates.go @@ -36,7 +36,7 @@ func Covers(a geo.Geometry, b geo.Geometry) (bool, error) { case PolygonAndPoint: // Computing whether a polygon covers a point is equivalent // to computing whether the point is covered by the polygon. - return PointKindRelatesToPolygonKind(pointKind, polygonKind, PointPolygonCoveredBy) + return PointKindCoveredByPolygonKind(pointKind, polygonKind) } return geos.Covers(a.EWKB(), b.EWKB()) @@ -58,7 +58,7 @@ func CoveredBy(a geo.Geometry, b geo.Geometry) (bool, error) { // A polygon cannot be covered by a point. return false, nil case PointAndPolygon: - return PointKindRelatesToPolygonKind(pointKind, polygonKind, PointPolygonCoveredBy) + return PointKindCoveredByPolygonKind(pointKind, polygonKind) } return geos.CoveredBy(a.EWKB(), b.EWKB()) @@ -82,7 +82,7 @@ func Contains(a geo.Geometry, b geo.Geometry) (bool, error) { case PolygonAndPoint: // Computing whether a polygon contains a point is equivalent // to computing whether the point is contained within the polygon. - return PointKindRelatesToPolygonKind(pointKind, polygonKind, PointPolygonWithin) + return PointKindWithinPolygonKind(pointKind, polygonKind) } return geos.Contains(a.EWKB(), b.EWKB()) @@ -148,7 +148,7 @@ func Intersects(a geo.Geometry, b geo.Geometry) (bool, error) { pointPolygonPair, pointKind, polygonKind := PointKindAndPolygonKind(a, b) switch pointPolygonPair { case PointAndPolygon, PolygonAndPoint: - return PointKindRelatesToPolygonKind(pointKind, polygonKind, PointPolygonIntersects) + return PointKindIntersectsPolygonKind(pointKind, polygonKind) } return geos.Intersects(a.EWKB(), b.EWKB()) @@ -335,7 +335,7 @@ func Within(a geo.Geometry, b geo.Geometry) (bool, error) { // A polygon cannot be contained within a point. return false, nil case PointAndPolygon: - return PointKindRelatesToPolygonKind(pointKind, polygonKind, PointPolygonWithin) + return PointKindWithinPolygonKind(pointKind, polygonKind) } return geos.Within(a.EWKB(), b.EWKB()) diff --git a/pkg/geo/geomfn/distance.go b/pkg/geo/geomfn/distance.go index 555dcae2ea34..3e064cb91ec1 100644 --- a/pkg/geo/geomfn/distance.go +++ b/pkg/geo/geomfn/distance.go @@ -635,7 +635,7 @@ func findPointSideOfLinearRing(point geodist.Point, linearRing geodist.LinearRin // See also: https://en.wikipedia.org/wiki/Nonzero-rule windingNumber := 0 p := point.GeomPoint - for edgeIdx := 0; edgeIdx < linearRing.NumEdges(); edgeIdx++ { + for edgeIdx, numEdges := 0, linearRing.NumEdges(); edgeIdx < numEdges; edgeIdx++ { e := linearRing.Edge(edgeIdx) eV0 := e.V0.GeomPoint eV1 := e.V1.GeomPoint @@ -836,125 +836,6 @@ func verifyDensifyFrac(f float64) error { return nil } -// PointPolygonRelationType defines a relationship type between -// a (multi)point and a (multi)polygon. -type PointPolygonRelationType int - -const ( - // PointPolygonIntersects is the relationship where a (multi)point - // intersects a (multi)polygon. - PointPolygonIntersects PointPolygonRelationType = iota + 1 - // PointPolygonCoveredBy is the relationship where a (multi)point - // is covered by a (multi)polygon. - PointPolygonCoveredBy - // PointPolygonWithin is the relationship where a (multi)point - // is contained within a (multi)polygon. - PointPolygonWithin -) - -// PointKindRelatesToPolygonKind returns whether a (multi)point and -// a (multi)polygon have the given relationship. -func PointKindRelatesToPolygonKind( - pointKind geo.Geometry, polygonKind geo.Geometry, relationType PointPolygonRelationType, -) (bool, error) { - pointKindBaseT, err := pointKind.AsGeomT() - if err != nil { - return false, err - } - polygonKindBaseT, err := polygonKind.AsGeomT() - if err != nil { - return false, err - } - pointKindIterator := geo.NewGeomTIterator(pointKindBaseT, geo.EmptyBehaviorOmit) - polygonKindIterator := geo.NewGeomTIterator(polygonKindBaseT, geo.EmptyBehaviorOmit) - - // TODO(ayang): Think about how to refactor these nested for loops - // Check whether each point intersects with at least one polygon. - // - For Intersects, at least one point must intersect with at least one polygon. - // - For CoveredBy, every point must intersect with at least one polygon. - // - For Within, every point must intersect with at least one polygon - // and at least one point must be inside at least one polygon. - intersectsOnce := false - insideOnce := false -pointOuterLoop: - for { - point, hasPoint, err := pointKindIterator.Next() - if err != nil { - return false, err - } - if !hasPoint { - break - } - // Reset the polygon iterator on each iteration of the point iterator. - polygonKindIterator.Reset() - curIntersects := false - for { - polygon, hasPolygon, err := polygonKindIterator.Next() - if err != nil { - return false, err - } - if !hasPolygon { - break - } - pointSide, err := findPointSideOfPolygon(point, polygon) - if err != nil { - return false, err - } - switch pointSide { - case insideLinearRing: - insideOnce = true - switch relationType { - case PointPolygonWithin: - continue pointOuterLoop - } - fallthrough - case onLinearRing: - intersectsOnce = true - curIntersects = true - switch relationType { - case PointPolygonIntersects: - // A single intersection is sufficient. - return true, nil - case PointPolygonCoveredBy: - // If the current point intersects, check the next point. - continue pointOuterLoop - case PointPolygonWithin: - // We can only skip to the next point if we have already seen a point - // that is inside the (multi)polygon. - if insideOnce { - continue pointOuterLoop - } - default: - return false, errors.Newf("unknown PointPolygonRelationType") - } - case outsideLinearRing: - default: - return false, errors.Newf("findPointSideOfPolygon returned unknown linearRingSide %d", pointSide) - } - } - // Case where a point in the (multi)point does not intersect - // a polygon in the (multi)polygon. - switch relationType { - case PointPolygonCoveredBy: - // Each point in a (multi)point must intersect a polygon in the - // (multi)point to be covered by it. - return false, nil - case PointPolygonWithin: - if !curIntersects { - return false, nil - } - } - } - switch relationType { - case PointPolygonCoveredBy: - return intersectsOnce, nil - case PointPolygonWithin: - return insideOnce, nil - default: - return false, nil - } -} - // findPointSideOfPolygon returns whether a point intersects with a polygon. func findPointSideOfPolygon(point geom.T, polygon geom.T) (linearRingSide, error) { // Convert point from a geom.T to a *geodist.Point. diff --git a/pkg/geo/geomfn/point_polygon_optimization.go b/pkg/geo/geomfn/point_polygon_optimization.go new file mode 100644 index 000000000000..eaffa2b27cf5 --- /dev/null +++ b/pkg/geo/geomfn/point_polygon_optimization.go @@ -0,0 +1,222 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package geomfn + +import ( + "github.com/cockroachdb/cockroach/pkg/geo" + "github.com/cockroachdb/errors" +) + +// PointPolygonControlFlowType signals what control flow to follow. +type PointPolygonControlFlowType int + +const ( + // PPCFCheckNextPolygon signals that the current point should be checked + // against the next polygon. + PPCFCheckNextPolygon PointPolygonControlFlowType = iota + // PPCFSkipToNextPoint signals that the rest of the checking for the current + // point can be skipped. + PPCFSkipToNextPoint + // PPCFReturnTrue signals that the function should exit early and return true. + PPCFReturnTrue +) + +// PointInPolygonEventListener is an interface implemented for each +// binary predicate making use of the point in polygon optimization +// to specify the behavior in pointKindRelatesToPolygonKind. +type PointInPolygonEventListener interface { + // OnPointIntersectsPolygon returns whether the function should exit and + // return true, skip to the next point, or check the current point against + // the next polygon in the case where a point intersects with a polygon. + // The strictlyInside param signifies whether the point is strictly inside + // or on the boundary of the polygon. + OnPointIntersectsPolygon(strictlyInside bool) PointPolygonControlFlowType + // OnPointDoesNotIntersect returns whether the function should early exit and + // return false in the case where a point does not intersect any polygon. + ExitIfPointDoesNotIntersect() bool + // AfterPointPolygonLoops returns the bool to return after the point-polygon + // loops have finished. + AfterPointPolygonLoops() bool +} + +// For Intersects, at least one point must intersect with at least one polygon. +type intersectsPIPEventListener struct{} + +func (el *intersectsPIPEventListener) OnPointIntersectsPolygon( + strictlyInside bool, +) PointPolygonControlFlowType { + // A single intersection is sufficient. + return PPCFReturnTrue +} + +func (el *intersectsPIPEventListener) ExitIfPointDoesNotIntersect() bool { + return false +} + +func (el *intersectsPIPEventListener) AfterPointPolygonLoops() bool { + return false +} + +var _ PointInPolygonEventListener = (*intersectsPIPEventListener)(nil) + +func newIntersectsPIPEventListener() *intersectsPIPEventListener { + return &intersectsPIPEventListener{} +} + +// For CoveredBy, every point must intersect with at least one polygon. +type coveredByPIPEventListener struct { + intersectsOnce bool +} + +func (el *coveredByPIPEventListener) OnPointIntersectsPolygon( + strictlyInside bool, +) PointPolygonControlFlowType { + // If the current point intersects, check the next point. + el.intersectsOnce = true + return PPCFSkipToNextPoint +} + +func (el *coveredByPIPEventListener) ExitIfPointDoesNotIntersect() bool { + // Each point in a (multi)point must intersect a polygon in the + // (multi)point to be covered by it. + return true +} + +func (el *coveredByPIPEventListener) AfterPointPolygonLoops() bool { + return el.intersectsOnce +} + +var _ PointInPolygonEventListener = (*coveredByPIPEventListener)(nil) + +func newCoveredByPIPEventListener() *coveredByPIPEventListener { + return &coveredByPIPEventListener{intersectsOnce: false} +} + +// For Within, every point must intersect with at least one polygon. +type withinPIPEventListener struct { + insideOnce bool +} + +func (el *withinPIPEventListener) OnPointIntersectsPolygon( + strictlyInside bool, +) PointPolygonControlFlowType { + // We can only skip to the next point if we have already seen a point + // that is inside the (multi)polygon. + if el.insideOnce { + return PPCFSkipToNextPoint + } + if strictlyInside { + el.insideOnce = true + return PPCFSkipToNextPoint + } + return PPCFCheckNextPolygon +} + +func (el *withinPIPEventListener) ExitIfPointDoesNotIntersect() bool { + // Each point in a (multi)point must intersect a polygon in the + // (multi)polygon to be contained within it. + return true +} + +func (el *withinPIPEventListener) AfterPointPolygonLoops() bool { + return el.insideOnce +} + +var _ PointInPolygonEventListener = (*withinPIPEventListener)(nil) + +func newWithinPIPEventListener() *withinPIPEventListener { + return &withinPIPEventListener{insideOnce: false} +} + +// PointKindIntersectsPolygonKind returns whether a (multi)point +// and a (multi)polygon intersect. +func PointKindIntersectsPolygonKind( + pointKind geo.Geometry, polygonKind geo.Geometry, +) (bool, error) { + return pointKindRelatesToPolygonKind(pointKind, polygonKind, newIntersectsPIPEventListener()) +} + +// PointKindCoveredByPolygonKind returns whether a (multi)point +// is covered by a (multi)polygon. +func PointKindCoveredByPolygonKind(pointKind geo.Geometry, polygonKind geo.Geometry) (bool, error) { + return pointKindRelatesToPolygonKind(pointKind, polygonKind, newCoveredByPIPEventListener()) +} + +// PointKindWithinPolygonKind returns whether a (multi)point +// is contained within a (multi)polygon. +func PointKindWithinPolygonKind(pointKind geo.Geometry, polygonKind geo.Geometry) (bool, error) { + return pointKindRelatesToPolygonKind(pointKind, polygonKind, newWithinPIPEventListener()) +} + +// pointKindRelatesToPolygonKind returns whether a (multi)point +// and a (multi)polygon have the given relationship. +func pointKindRelatesToPolygonKind( + pointKind geo.Geometry, polygonKind geo.Geometry, eventListener PointInPolygonEventListener, +) (bool, error) { + pointKindBaseT, err := pointKind.AsGeomT() + if err != nil { + return false, err + } + polygonKindBaseT, err := polygonKind.AsGeomT() + if err != nil { + return false, err + } + pointKindIterator := geo.NewGeomTIterator(pointKindBaseT, geo.EmptyBehaviorOmit) + polygonKindIterator := geo.NewGeomTIterator(polygonKindBaseT, geo.EmptyBehaviorOmit) + + // Check whether each point intersects with at least one polygon. + // The behavior for each predicate is dictated by eventListener. +pointOuterLoop: + for { + point, hasPoint, err := pointKindIterator.Next() + if err != nil { + return false, err + } + if !hasPoint { + break + } + // Reset the polygon iterator on each iteration of the point iterator. + polygonKindIterator.Reset() + curIntersects := false + for { + polygon, hasPolygon, err := polygonKindIterator.Next() + if err != nil { + return false, err + } + if !hasPolygon { + break + } + pointSide, err := findPointSideOfPolygon(point, polygon) + if err != nil { + return false, err + } + switch pointSide { + case insideLinearRing, onLinearRing: + curIntersects = true + strictlyInside := pointSide == insideLinearRing + switch eventListener.OnPointIntersectsPolygon(strictlyInside) { + case PPCFCheckNextPolygon: + case PPCFSkipToNextPoint: + continue pointOuterLoop + case PPCFReturnTrue: + return true, nil + } + case outsideLinearRing: + default: + return false, errors.Newf("findPointSideOfPolygon returned unknown linearRingSide %d", pointSide) + } + } + if !curIntersects && eventListener.ExitIfPointDoesNotIntersect() { + return false, nil + } + } + return eventListener.AfterPointPolygonLoops(), nil +} From 198fa1adf1b1eee2c3ccdd6837b785d5eef49827 Mon Sep 17 00:00:00 2001 From: Faizan Qazi Date: Mon, 22 Mar 2021 09:41:04 -0400 Subject: [PATCH 3/5] sql: lease acquisition of OFFLINE descs may starve bulk operations Fixes: #61798 Previously, offline descriptors would never have their leases cached and they would be released once the reference count hit zero. This was inadequate because when attempting to online these tables again the lease acquisition could be pushed back by other operations, leading to starvation / live locks. To address this, this patch will allow the leases of offline descriptors to be cached. Release note (bug fix): Lease acquisitions of descriptor in a offline state may starve out bulk operations (backup / restore) --- pkg/sql/catalog/descs/collection.go | 2 +- pkg/sql/catalog/lease/BUILD.bazel | 4 + pkg/sql/catalog/lease/lease.go | 52 +++++++--- pkg/sql/catalog/lease/lease_test.go | 148 +++++++++++++++++++++++++++- 4 files changed, 189 insertions(+), 17 deletions(-) diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go index 69f9a225bcb1..670c3f78f66d 100644 --- a/pkg/sql/catalog/descs/collection.go +++ b/pkg/sql/catalog/descs/collection.go @@ -282,7 +282,7 @@ func (tc *Collection) getLeasedDescriptorByName( // Read the descriptor from the store in the face of some specific errors // because of a known limitation of AcquireByName. See the known // limitations of AcquireByName for details. - if catalog.HasInactiveDescriptorError(err) || + if (catalog.HasInactiveDescriptorError(err) && errors.Is(err, catalog.ErrDescriptorDropped)) || errors.Is(err, catalog.ErrDescriptorNotFound) { return nil, true, nil } diff --git a/pkg/sql/catalog/lease/BUILD.bazel b/pkg/sql/catalog/lease/BUILD.bazel index a8d15c800937..d57ab1c93cba 100644 --- a/pkg/sql/catalog/lease/BUILD.bazel +++ b/pkg/sql/catalog/lease/BUILD.bazel @@ -57,6 +57,7 @@ go_test( "//pkg/jobs/jobspb", "//pkg/keys", "//pkg/kv", + "//pkg/kv/kvserver", "//pkg/roachpb", "//pkg/security", "//pkg/security/securitytest", @@ -70,7 +71,9 @@ go_test( "//pkg/sql/catalog/tabledesc", "//pkg/sql/pgwire/pgcode", "//pkg/sql/sem/tree", + "//pkg/sql/sessiondata", "//pkg/sql/sqltestutils", + "//pkg/sql/sqlutil", "//pkg/sql/tests", "//pkg/testutils", "//pkg/testutils/serverutils", @@ -87,6 +90,7 @@ go_test( "//pkg/util/syncutil", "//pkg/util/timeutil", "//pkg/util/tracing", + "//pkg/util/uuid", "@com_github_cockroachdb_cockroach_go//crdb", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_logtags//:logtags", diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index 3395eaadd7c6..5f92487fbf45 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -219,7 +219,7 @@ func (s storage) acquire( return err } if err := catalog.FilterDescriptorState( - desc, tree.CommonLookupFlags{}, // filter all non-public state + desc, tree.CommonLookupFlags{IncludeOffline: true}, // filter dropped only ); err != nil { return err } @@ -984,7 +984,7 @@ func purgeOldVersions( ctx context.Context, db *kv.DB, id descpb.ID, - takenOffline bool, + dropped bool, minVersion descpb.DescriptorVersion, m *Manager, ) error { @@ -998,15 +998,15 @@ func purgeOldVersions( } empty := len(t.mu.active.data) == 0 && t.mu.acquisitionsInProgress == 0 t.mu.Unlock() - if empty && !takenOffline { + if empty && !dropped { // We don't currently have a version on this descriptor, so no need to refresh // anything. return nil } - removeInactives := func(takenOffline bool) { + removeInactives := func(dropped bool) { t.mu.Lock() - t.mu.takenOffline = takenOffline + t.mu.takenOffline = dropped leases := t.removeInactiveVersions() t.mu.Unlock() for _, l := range leases { @@ -1014,8 +1014,8 @@ func purgeOldVersions( } } - if takenOffline { - removeInactives(true /* takenOffline */) + if dropped { + removeInactives(true /* dropped */) return nil } @@ -1031,7 +1031,7 @@ func purgeOldVersions( return errRenewLease } newest.incRefcount() - removeInactives(false /* takenOffline */) + removeInactives(false /* dropped */) s, err := t.release(newest.Descriptor, m.removeOnceDereferenced()) if err != nil { return err @@ -1405,6 +1405,28 @@ func (m *Manager) AcquireByName( parentSchemaID descpb.ID, name string, ) (catalog.Descriptor, hlc.Timestamp, error) { + // When offline descriptor leases were not allowed to be cached, + // attempt to acquire a lease on them would generate a descriptor + // offline error. Recent changes allow offline descriptor leases + // to be cached, but callers still need the offline error generated. + // This logic will release the lease (the lease manager will still + // cache it), and generate the offline descriptor error. + validateDescriptorForReturn := func(desc catalog.Descriptor, + expiration hlc.Timestamp) (catalog.Descriptor, hlc.Timestamp, error) { + if desc.Offline() { + if err := catalog.FilterDescriptorState( + desc, tree.CommonLookupFlags{}, + ); err != nil { + err2 := m.Release(desc) + if err2 != nil { + log.Warningf(ctx, "error releasing lease: %s", err2) + } + return nil, hlc.Timestamp{}, err + } + } + return desc, expiration, nil + } + // Check if we have cached an ID for this name. descVersion := m.names.get(parentID, parentSchemaID, name, timestamp) if descVersion != nil { @@ -1419,7 +1441,7 @@ func (m *Manager) AcquireByName( } } } - return descVersion.Descriptor, descVersion.expiration, nil + return validateDescriptorForReturn(descVersion.Descriptor, descVersion.expiration) } if err := m.Release(descVersion); err != nil { return nil, hlc.Timestamp{}, err @@ -1429,7 +1451,7 @@ func (m *Manager) AcquireByName( if err != nil { return nil, hlc.Timestamp{}, err } - return desc, expiration, nil + return validateDescriptorForReturn(desc, expiration) } // We failed to find something in the cache, or what we found is not @@ -1498,7 +1520,7 @@ func (m *Manager) AcquireByName( return nil, hlc.Timestamp{}, catalog.ErrDescriptorNotFound } } - return desc, expiration, nil + return validateDescriptorForReturn(desc, expiration) } // resolveName resolves a descriptor name to a descriptor ID at a particular @@ -1713,11 +1735,11 @@ func (m *Manager) refreshLeases( } id, version, name, state := descpb.GetDescriptorMetadata(desc) - goingOffline := state == descpb.DescriptorState_DROP || state == descpb.DescriptorState_OFFLINE + dropped := state == descpb.DescriptorState_DROP // Try to refresh the lease to one >= this version. - log.VEventf(ctx, 2, "purging old version of descriptor %d@%d (offline %v)", - id, version, goingOffline) - if err := purgeOldVersions(ctx, db, id, goingOffline, version, m); err != nil { + log.VEventf(ctx, 2, "purging old version of descriptor %d@%d (dropped %v)", + id, version, dropped) + if err := purgeOldVersions(ctx, db, id, dropped, version, m); err != nil { log.Warningf(ctx, "error purging leases for descriptor %d(%s): %s", id, name, err) } diff --git a/pkg/sql/catalog/lease/lease_test.go b/pkg/sql/catalog/lease/lease_test.go index 0d4e892e8c76..6914f4e30779 100644 --- a/pkg/sql/catalog/lease/lease_test.go +++ b/pkg/sql/catalog/lease/lease_test.go @@ -27,7 +27,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -39,7 +41,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqltestutils" + "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -50,10 +54,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" + "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" "github.com/lib/pq" @@ -2361,6 +2367,7 @@ func TestLeaseWithOfflineTables(t *testing.T) { func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { flags := tree.ObjectLookupFlagsWithRequiredTableKind(tree.ResolveRequireTableDesc) flags.CommonLookupFlags.IncludeOffline = true + flags.CommonLookupFlags.IncludeDropped = true desc, err := descsCol.GetMutableTableByID(ctx, txn, testTableID(), flags) require.NoError(t, err) require.Equal(t, desc.State, expected) @@ -2404,9 +2411,16 @@ func TestLeaseWithOfflineTables(t *testing.T) { checkLeaseState(true /* shouldBePresent */) // Take the table offline and back online again. - // This should relinquish the lease. + // This should not relinquish the lease anymore + // and offline ones will now be held. setTableState(descpb.DescriptorState_PUBLIC, descpb.DescriptorState_OFFLINE) setTableState(descpb.DescriptorState_OFFLINE, descpb.DescriptorState_PUBLIC) + checkLeaseState(true /* shouldBePresent */) + + // Take the table dropped and back online again. + // This should relinquish the lease. + setTableState(descpb.DescriptorState_PUBLIC, descpb.DescriptorState_DROP) + setTableState(descpb.DescriptorState_DROP, descpb.DescriptorState_PUBLIC) checkLeaseState(false /* shouldBePresent */) // Query the table, thereby acquiring a lease once again. @@ -2709,3 +2723,135 @@ func TestDropDescriptorRacesWithAcquisition(t *testing.T) { return true }) } + +// TestOfflineLeaseRefresh validates that no live lock can occur, +// after a table is brought offline. Specifically a table a will be +// brought offline, and then one transaction will attempt to bring it +// online while another transaction will attempt to do a read. The read +// transaction could previously push back the lease of transaction +// trying to online the table perpetually (as seen in issue #61798). +func TestOfflineLeaseRefresh(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() + waitForTxn := make(chan chan struct{}) + waitForRqstFilter := make(chan chan struct{}) + errorChan := make(chan error) + var txnID uuid.UUID + var mu syncutil.RWMutex + + knobs := &kvserver.StoreTestingKnobs{ + TestingRequestFilter: func(ctx context.Context, req roachpb.BatchRequest) *roachpb.Error { + mu.RLock() + checkRequest := req.Txn != nil && req.Txn.ID.Equal(txnID) + mu.RUnlock() + if _, ok := req.GetArg(roachpb.EndTxn); checkRequest && ok { + notify := make(chan struct{}) + waitForRqstFilter <- notify + <-notify + } + return nil + }, + } + params := base.TestServerArgs{Knobs: base.TestingKnobs{Store: knobs}} + tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: params}) + s := tc.Server(0) + defer tc.Stopper().Stop(ctx) + conn := tc.ServerConn(0) + + // Create t1 that will be offline, and t2, + // that will serve inserts. + _, err := conn.Exec(` +CREATE DATABASE d1; +CREATE TABLE d1.t1 (name int); +INSERT INTO d1.t1 values(5); +INSERT INTO d1.t1 values(5); +INSERT INTO d1.t1 values(5); +CREATE TABLE d1.t2 (name int); +`) + require.NoError(t, err) + + tableID := descpb.InvalidID + + // Force the table descriptor into a offline state + err = descs.Txn(ctx, s.ClusterSettings(), s.LeaseManager().(*lease.Manager), s.InternalExecutor().(sqlutil.InternalExecutor), s.DB(), + func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error { + _, tableDesc, err := descriptors.GetMutableTableByName(ctx, txn, tree.NewTableNameWithSchema("d1", "public", "t1"), tree.ObjectLookupFlagsWithRequired()) + if err != nil { + return err + } + tableDesc.SetOffline("For unit test") + err = descriptors.WriteDesc(ctx, false, tableDesc, txn) + if err != nil { + return err + } + tableID = tableDesc.ID + return nil + }) + require.NoError(t, err) + + _, err = s.LeaseManager().(*lease.Manager).WaitForOneVersion(ctx, tableID, retry.Options{}) + require.NoError(t, err) + + go func() { + err := descs.Txn(ctx, s.ClusterSettings(), s.LeaseManager().(*lease.Manager), + s.InternalExecutor().(sqlutil.InternalExecutor), s.DB(), + func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error { + close(waitForRqstFilter) + mu.Lock() + waitForRqstFilter = make(chan chan struct{}) + txnID = txn.ID() + mu.Unlock() + + // Online the descriptor by making it public + _, tableDesc, err := descriptors.GetMutableTableByName(ctx, txn, + tree.NewTableNameWithSchema("d1", "public", "t1"), + tree.ObjectLookupFlags{CommonLookupFlags: tree.CommonLookupFlags{ + Required: true, + RequireMutable: true, + IncludeOffline: true, + AvoidCached: true, + }}) + if err != nil { + return err + } + tableDesc.SetPublic() + err = descriptors.WriteDesc(ctx, false, tableDesc, txn) + if err != nil { + return err + } + // Allow the select on the table to proceed, + // so that it waits on the channel at the appropriate + // moment. + notify := make(chan struct{}) + waitForTxn <- notify + <-notify + + // Select from an unrelated table + _, err = s.InternalExecutor().(sqlutil.InternalExecutor).ExecEx(ctx, "inline-exec", txn, + sessiondata.InternalExecutorOverride{User: security.RootUserName()}, + "insert into d1.t2 values (10);") + return err + + }) + close(waitForTxn) + close(waitForRqstFilter) + errorChan <- err + }() + + for notify := range waitForTxn { + close(notify) + mu.RLock() + rqstFilterChannel := waitForRqstFilter + mu.RUnlock() + for notify2 := range rqstFilterChannel { + // Push the query trying to online the table out by + // leasing out the table again + _, err = conn.Query("select * from d1.t1") + require.EqualError(t, err, "pq: relation \"t1\" is offline: For unit test", + "Table offline error was not generated as expected") + close(notify2) + } + } + require.NoError(t, <-errorChan) + close(errorChan) +} From 4766afcae520eec998a90c6bfd012a5126994a02 Mon Sep 17 00:00:00 2001 From: Ricky Stewart Date: Tue, 30 Mar 2021 12:58:35 -0500 Subject: [PATCH 4/5] build: upgrade go to 1.15.10 Pick up a patch to a compiler bug (see #62521). * [ ] Adjust the Pebble tests to run in new version. * [x] Adjust version in Docker image ([source](./builder/Dockerfile)). * [x] Rebuild and push the Docker image (following [Basic Process](#basic-process)) * [x] Bump the version in `WORKSPACE` under `go_register_toolchains`. You may need to bump [rules_go](https://github.com/bazelbuild/rules_go/releases). * [x] Bump the version in `builder.sh` accordingly ([source](./builder.sh#L6)). * [ ] Bump the version in `go-version-check.sh` ([source](./go-version-check.sh)), unless bumping to a new patch release. * [ ] Bump the go version in `go.mod`. You may also need to rerun `make vendor_rebuild` if vendoring has changed. * [x] Bump the default installed version of Go in `bootstrap-debian.sh` ([source](./bootstrap/bootstrap-debian.sh#L40-42)). * [x] Replace other mentions of the older version of go (grep for `golang:` and `go`). * [ ] Update the `builder.dockerImage` parameter in the TeamCity [`Cockroach`](https://teamcity.cockroachdb.com/admin/editProject.html?projectId=Cockroach&tab=projectParams) and [`Internal`](https://teamcity.cockroachdb.com/admin/editProject.html?projectId=Internal&tab=projectParams) projects. * [ ] Adjust `GO_VERSION` in the TeamCity agent image ([setup script](./packer/teamcity-agent.sh)) and ask the Developer Infrastructure team to deploy new images. Resolves #62809. Release note (general change): Upgrade the CRDB binary to Go 1.15.10. --- WORKSPACE | 2 +- build/README.md | 2 +- build/bootstrap/bootstrap-debian.sh | 4 ++-- build/builder.sh | 2 +- build/builder/Dockerfile | 4 ++-- build/packer/teamcity-agent.sh | 23 ++++++++++++----------- 6 files changed, 19 insertions(+), 18 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index a70970f8b559..f2bf027f2790 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -98,7 +98,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe go_rules_dependencies() -go_register_toolchains(go_version = "1.15.6") +go_register_toolchains(go_version = "1.15.10") # Configure nodeJS. load("@build_bazel_rules_nodejs//:index.bzl", "yarn_install") diff --git a/build/README.md b/build/README.md index 7142f7ee4eb1..9698d70a00fd 100644 --- a/build/README.md +++ b/build/README.md @@ -80,7 +80,7 @@ Please copy this checklist (based on [Basic Process](#basic-process)) into the r back to this document and perform these steps: * [ ] Adjust the Pebble tests to run in new version. -* [ ] Adjust version in Docker image ([source](./builder/Dockerfile#L199-L200)). +* [ ] Adjust version in Docker image ([source](./builder/Dockerfile)). * [ ] Rebuild and push the Docker image (following [Basic Process](#basic-process)) * [ ] Bump the version in `WORKSPACE` under `go_register_toolchains`. You may need to bump [rules_go](https://github.com/bazelbuild/rules_go/releases). * [ ] Bump the version in `builder.sh` accordingly ([source](./builder.sh#L6)). diff --git a/build/bootstrap/bootstrap-debian.sh b/build/bootstrap/bootstrap-debian.sh index 8bd7cfb91160..07dea0a03ca5 100755 --- a/build/bootstrap/bootstrap-debian.sh +++ b/build/bootstrap/bootstrap-debian.sh @@ -46,9 +46,9 @@ sudo tar -C /usr -zxf /tmp/cmake.tgz && rm /tmp/cmake.tgz # Install Go. trap 'rm -f /tmp/go.tgz' EXIT -curl -fsSL https://dl.google.com/go/go1.15.6.linux-amd64.tar.gz > /tmp/go.tgz +curl -fsSL https://dl.google.com/go/go1.15.10.linux-amd64.tar.gz > /tmp/go.tgz sha256sum -c - < /etc/apt/sources.list.d/docker.list < /tmp/go.tgz +sha256sum -c - < Date: Tue, 30 Mar 2021 13:34:10 -0700 Subject: [PATCH 5/5] schemaexpr: remove inapplicable TODO Release note: None --- pkg/sql/catalog/schemaexpr/default_exprs.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/sql/catalog/schemaexpr/default_exprs.go b/pkg/sql/catalog/schemaexpr/default_exprs.go index ef8622adc106..98dd54353a29 100644 --- a/pkg/sql/catalog/schemaexpr/default_exprs.go +++ b/pkg/sql/catalog/schemaexpr/default_exprs.go @@ -26,7 +26,6 @@ import ( // The length of the result slice matches the length of the input column descriptors. // For every column that has no default expression, a NULL expression is reported // as default. -// TODO(mgartner): Move this to the schemaexpr package. func MakeDefaultExprs( ctx context.Context, cols []descpb.ColumnDescriptor,