diff --git a/Gopkg.lock b/Gopkg.lock index caa3a743830..7d82e3b41d0 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -194,6 +194,14 @@ revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" +[[projects]] + branch = "master" + digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "NUT" + revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" + [[projects]] branch = "master" digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" @@ -313,9 +321,25 @@ pruneopts = "NUT" revision = "1c287c953996ab3a0bf535dba9d53d809d3dc0b6" +[[projects]] + digest = "1:cffc7d72991e2d84e164a07a0fabbc67a3fce70745d425e41245358a0c107d66" + name = "github.com/pingcap/errcode" + packages = ["."] + pruneopts = "NUT" + revision = "a1a7271709d9bea2d0a0d2aab8a97d7a2ce0ea51" + version = "v0.1" + +[[projects]] + digest = "1:677ff2ee188099669fbecf583de8c23111020adee48d71a6f60c733f6b2fea7a" + name = "github.com/pingcap/errors" + packages = ["."] + pruneopts = "NUT" + revision = "31ffda8a65b0f910c6d65f1fef40e761fd606384" + version = "v0.10.1" + [[projects]] branch = "master" - digest = "1:3be99496a5d1fc018eb176c53a81df986408af94bece871d829eaceddf7c5325" + digest = "1:94d3fa0d66021a886bcc2cb35fad3571afc5d9cda8f295bcf05b0c15a86c8520" name = "github.com/pingcap/kvproto" packages = [ "pkg/eraftpb", @@ -323,7 +347,8 @@ "pkg/pdpb", ] pruneopts = "NUT" - revision = "279515615485b0f2d12f1421cc412fe2784e0190" + revision = "fae11119f066d7fed2ae3d785fedd9f446143f70" + source = "https://github.com/pingcap/kvproto.git" [[projects]] digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121" @@ -410,6 +435,27 @@ revision = "583c0c0531f06d5278b7d917446061adc344b5cd" version = "v1.0.1" +[[projects]] + branch = "master" + digest = "1:f2ffd421680b0a3f7887501b3c6974bcf19217ecd301d0e2c9b681940ec363d5" + name = "github.com/syndtr/goleveldb" + packages = [ + "leveldb", + "leveldb/cache", + "leveldb/comparer", + "leveldb/errors", + "leveldb/filter", + "leveldb/iterator", + "leveldb/journal", + "leveldb/memdb", + "leveldb/opt", + "leveldb/storage", + "leveldb/table", + "leveldb/util", + ] + pruneopts = "NUT" + revision = "ae2bd5eed72d46b28834ec3f60db3a3ebedd8dbd" + [[projects]] branch = "master" digest = "1:9fbba9f266710aae2dfe8bcdb24703b25ada0512623cabc96efb99bc7d579aef" @@ -561,7 +607,7 @@ revision = "86e600f69ee4704c6efbf6a2a40a5c10700e76c2" [[projects]] - digest = "1:7c485de3fafb9d0220cd8a5a53dea873b7a7771d88ad894998fc5b2a1a0b3b35" + digest = "1:0547cc571133c0dcc6422e5c53b72aafaa0ba3f14bcf33b2fd16ac5f3ea11d33" name = "google.golang.org/grpc" packages = [ ".", @@ -592,9 +638,9 @@ "transport", ] pruneopts = "NUT" - revision = "41344da2231b913fa3d983840a57a6b1b7b631a1" + revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" source = "https://github.com/grpc/grpc-go.git" - version = "v1.12.0" + version = "v1.12.2" [[projects]] digest = "1:80c34337e8a734e190f2d1b716cae774cca74db98315166f92074434e9af0227" @@ -640,6 +686,7 @@ "github.com/montanaflynn/stats", "github.com/opentracing/opentracing-go", "github.com/pingcap/check", + "github.com/pingcap/errcode", "github.com/pingcap/kvproto/pkg/eraftpb", "github.com/pingcap/kvproto/pkg/metapb", "github.com/pingcap/kvproto/pkg/pdpb", @@ -649,6 +696,8 @@ "github.com/sirupsen/logrus", "github.com/spf13/cobra", "github.com/spf13/pflag", + "github.com/syndtr/goleveldb/leveldb", + "github.com/syndtr/goleveldb/leveldb/util", "github.com/unrolled/render", "github.com/urfave/negroni", "go.uber.org/zap", @@ -656,6 +705,7 @@ "google.golang.org/grpc", "google.golang.org/grpc/codes", "google.golang.org/grpc/credentials", + "google.golang.org/grpc/grpclog", "google.golang.org/grpc/status", "gopkg.in/natefinch/lumberjack.v2", ] diff --git a/Gopkg.toml b/Gopkg.toml index 101eeb18673..dde7d1ae008 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -18,7 +18,7 @@ [[constraint]] name = "google.golang.org/grpc" source = "https://github.com/grpc/grpc-go.git" - version = "1.12.0" + version = "=1.12.2" [[constraint]] name = "github.com/chzyer/readline" @@ -26,6 +26,7 @@ [[constraint]] name = "github.com/pingcap/kvproto" + source = "https://github.com/pingcap/kvproto.git" branch = "master" [[constraint]] @@ -40,6 +41,10 @@ name = "github.com/pkg/errors" version = ">= 0.8.0" +[[constraint]] + name = "github.com/pingcap/errcode" + version = ">= 0.1.0" + [[override]] name = "github.com/BurntSushi/toml" revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" diff --git a/Makefile b/Makefile index 672386e1391..739694ed435 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ all: dev dev: build check test -ci: build check basic_test +ci: build check basic-test build: ifeq ("$(WITH_RACE)", "1") @@ -46,7 +46,7 @@ test: retool-setup CGO_ENABLED=1 go test -race -cover $(TEST_PKGS) || { $(GOFAIL_DISABLE); exit 1; } @$(GOFAIL_DISABLE) -basic_test: +basic-test: @$(GOFAIL_ENABLE) go test $(BASIC_TEST_PKGS) || { $(GOFAIL_DISABLE); exit 1; } @$(GOFAIL_DISABLE) @@ -103,6 +103,11 @@ endif simulator: CGO_ENABLED=0 go build -o bin/pd-simulator tools/pd-simulator/main.go +clean-test: + rm -rf /tmp/test_pd* + rm -rf /tmp/pd-integration-test* + rm -rf /tmp/test_etcd* + gofail-enable: # Converting gofail failpoints... @$(GOFAIL_ENABLE) diff --git a/docs/development.md b/docs/development.md index eda2905af1f..607946e057a 100644 --- a/docs/development.md +++ b/docs/development.md @@ -42,5 +42,5 @@ Consider that raml2html depends on various npm packages and can only be run unde ## Error responses -Error responses from the server are switching to using [error codes](../pkg/error_code/error_code.go). +Error responses from the server are switching to using [errcode codes](https://github.com/pingcap/errcode). The should use the `errorResp` function. Please look at other places in the codebase that use `errorResp`. diff --git a/pkg/error_code/error_code_test.go b/pkg/error_code/error_code_test.go deleted file mode 100644 index 7ea62dc03ed..00000000000 --- a/pkg/error_code/error_code_test.go +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2018 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package errcode_test - -import ( - "errors" - "fmt" - "reflect" - "testing" - - "github.com/pingcap/pd/pkg/error_code" -) - -// Test setting the HTTP code -type HTTPError struct{} - -func (e HTTPError) Error() string { return "error" } - -const httpCodeStr = "input.http" - -var codeHttp900 = errcode.InvalidInputCode.Child(httpCodeStr).SetHTTP(900) - -func (e HTTPError) Code() errcode.Code { - return codeHttp900 -} - -func TestHttpErrorCode(t *testing.T) { - http := HTTPError{} - AssertHTTPCode(t, http, 900) - ErrorEquals(t, http, "error") - ClientDataEquals(t, http, http, httpCodeStr) -} - -// Test a very simple error -type MinimalError struct{} - -func (e MinimalError) Error() string { return "error" } - -var _ errcode.ErrorCode = (*MinimalError)(nil) // assert implements interface - -const codeString errcode.CodeStr = "input.testcode" - -var registeredCode errcode.Code = errcode.InvalidInputCode.Child(codeString) - -func (e MinimalError) Code() errcode.Code { return registeredCode } - -func TestMinimalErrorCode(t *testing.T) { - minimal := MinimalError{} - AssertCodes(t, minimal) - ErrorEquals(t, minimal, "error") - ClientDataEquals(t, minimal, minimal) - OpEquals(t, minimal, "") -} - -// We don't prevent duplicate codes -var childPathOnlyCode errcode.Code = errcode.InvalidInputCode.Child("testcode") - -type ChildOnlyError struct{} - -func (e ChildOnlyError) Error() string { return "error" } - -var _ errcode.ErrorCode = (*ChildOnlyError)(nil) // assert implements interface - -func (e ChildOnlyError) Code() errcode.Code { return childPathOnlyCode } - -func TestChildOnlyErrorCode(t *testing.T) { - coe := ChildOnlyError{} - AssertCodes(t, coe) - ErrorEquals(t, coe, "error") - ClientDataEquals(t, coe, coe) -} - -// Test a top-level error -type TopError struct{} - -func (e TopError) Error() string { return "error" } - -var _ errcode.ErrorCode = (*TopError)(nil) // assert implements interface - -const topCodeStr errcode.CodeStr = "top" - -var topCode errcode.Code = errcode.NewCode(topCodeStr) - -func (e TopError) Code() errcode.Code { return topCode } - -func TestTopErrorCode(t *testing.T) { - top := TopError{} - AssertCodes(t, top, topCodeStr) - ErrorEquals(t, top, "error") - ClientDataEquals(t, top, top, topCodeStr) -} - -// Test a deep hierarchy -type DeepError struct{} - -func (e DeepError) Error() string { return "error" } - -var _ errcode.ErrorCode = (*DeepError)(nil) // assert implements interface - -const deepCodeStr errcode.CodeStr = "input.testcode.very.very.deep" - -var intermediateCode = registeredCode.Child("input.testcode.very").SetHTTP(800) -var deepCode errcode.Code = intermediateCode.Child("input.testcode.very.very").Child(deepCodeStr) - -func (e DeepError) Code() errcode.Code { return deepCode } - -func TestDeepErrorCode(t *testing.T) { - deep := DeepError{} - AssertHTTPCode(t, deep, 800) - AssertCode(t, deep, deepCodeStr) - ErrorEquals(t, deep, "error") - ClientDataEquals(t, deep, deep, deepCodeStr) -} - -// Test an ErrorWrapper that has different error types placed into it -type ErrorWrapper struct{ Err error } - -var _ errcode.ErrorCode = (*ErrorWrapper)(nil) // assert implements interface -var _ errcode.HasClientData = (*ErrorWrapper)(nil) // assert implements interface - -func (e ErrorWrapper) Code() errcode.Code { - return registeredCode -} -func (e ErrorWrapper) Error() string { - return e.Err.Error() -} -func (e ErrorWrapper) GetClientData() interface{} { - return e.Err -} - -type Struct1 struct{ A string } -type StructConstError1 struct{ A string } - -func (e Struct1) Error() string { - return e.A -} - -func (e StructConstError1) Error() string { - return "error" -} - -type Struct2 struct { - A string - B string -} - -func (e Struct2) Error() string { - return fmt.Sprintf("error A & B %s & %s", e.A, e.B) -} - -func TestErrorWrapperCode(t *testing.T) { - wrapped := ErrorWrapper{Err: errors.New("error")} - AssertCodes(t, wrapped) - ErrorEquals(t, wrapped, "error") - ClientDataEquals(t, wrapped, errors.New("error")) - s2 := Struct2{A: "A", B: "B"} - wrappedS2 := ErrorWrapper{Err: s2} - AssertCodes(t, wrappedS2) - ErrorEquals(t, wrappedS2, "error A & B A & B") - ClientDataEquals(t, wrappedS2, s2) - s1 := Struct1{A: "A"} - ClientDataEquals(t, ErrorWrapper{Err: s1}, s1) - sconst := StructConstError1{A: "A"} - ClientDataEquals(t, ErrorWrapper{Err: sconst}, sconst) -} - -var internalChildCodeStr errcode.CodeStr = "internal.child.granchild" -var internalChild = errcode.InternalCode.Child("internal.child").SetHTTP(503).Child(internalChildCodeStr) - -type InternalChild struct{} - -func (ic InternalChild) Error() string { return "internal child error" } -func (ic InternalChild) Code() errcode.Code { return internalChild } - -func TestNewInvalidInputErr(t *testing.T) { - err := errcode.NewInvalidInputErr(errors.New("new error")) - AssertCodes(t, err, "input") - ErrorEquals(t, err, "new error") - ClientDataEquals(t, err, errors.New("new error"), "input") - - err = errcode.NewInvalidInputErr(MinimalError{}) - AssertCodes(t, err, "input.testcode") - ErrorEquals(t, err, "error") - ClientDataEquals(t, err, MinimalError{}, errcode.CodeStr("input.testcode")) - - internalErr := errcode.NewInternalErr(MinimalError{}) - err = errcode.NewInvalidInputErr(internalErr) - internalCodeStr := errcode.CodeStr("internal") - AssertCode(t, err, internalCodeStr) - AssertHTTPCode(t, err, 500) - ErrorEquals(t, err, "error") - ClientDataEquals(t, err, MinimalError{}, internalCodeStr) - - err = errcode.NewInvalidInputErr(InternalChild{}) - AssertCode(t, err, internalChildCodeStr) - AssertHTTPCode(t, err, 503) - ErrorEquals(t, err, "internal child error") - ClientDataEquals(t, err, InternalChild{}, internalChildCodeStr) -} - -func TestNewInternalErr(t *testing.T) { - internalCodeStr := errcode.CodeStr("internal") - err := errcode.NewInternalErr(errors.New("new error")) - AssertCode(t, err, internalCodeStr) - AssertHTTPCode(t, err, 500) - ErrorEquals(t, err, "new error") - ClientDataEquals(t, err, errors.New("new error"), "internal") - - err = errcode.NewInternalErr(MinimalError{}) - AssertCode(t, err, internalCodeStr) - AssertHTTPCode(t, err, 500) - ErrorEquals(t, err, "error") - ClientDataEquals(t, err, MinimalError{}, internalCodeStr) - - invalidErr := errcode.NewInvalidInputErr(MinimalError{}) - err = errcode.NewInternalErr(invalidErr) - AssertCode(t, err, internalCodeStr) - AssertHTTPCode(t, err, 500) - ErrorEquals(t, err, "error") - ClientDataEquals(t, err, MinimalError{}, internalCodeStr) -} - -// Test Operation -type OpErrorHas struct{ MinimalError } - -func (e OpErrorHas) GetOperation() string { return "has" } - -type OpErrorEmbed struct { - errcode.EmbedOp - MinimalError -} - -var _ errcode.ErrorCode = (*OpErrorHas)(nil) // assert implements interface -var _ errcode.HasOperation = (*OpErrorHas)(nil) // assert implements interface -var _ errcode.ErrorCode = (*OpErrorEmbed)(nil) // assert implements interface -var _ errcode.HasOperation = (*OpErrorEmbed)(nil) // assert implements interface - -func TestOpErrorCode(t *testing.T) { - AssertOperation(t, "foo", "") - has := OpErrorHas{} - AssertOperation(t, has, "has") - AssertCodes(t, has) - ErrorEquals(t, has, "error") - ClientDataEquals(t, has, has) - OpEquals(t, has, "has") - - OpEquals(t, OpErrorEmbed{}, "") - OpEquals(t, OpErrorEmbed{EmbedOp: errcode.EmbedOp{Op: "field"}}, "field") - - opEmpty := errcode.Op("") - op := errcode.Op("modify") - OpEquals(t, opEmpty.AddTo(MinimalError{}), "") - OpEquals(t, op.AddTo(MinimalError{}), "modify") - - OpEquals(t, ErrorWrapper{Err: has}, "has") - OpEquals(t, ErrorWrapper{Err: OpErrorEmbed{EmbedOp: errcode.EmbedOp{Op: "field"}}}, "field") - - opErrCode := errcode.OpErrCode{Operation: "opcode", Err: MinimalError{}} - AssertOperation(t, opErrCode, "opcode") - OpEquals(t, opErrCode, "opcode") - - OpEquals(t, ErrorWrapper{Err: opErrCode}, "opcode") - wrappedHas := ErrorWrapper{Err: errcode.OpErrCode{Operation: "opcode", Err: has}} - AssertOperation(t, wrappedHas, "") - OpEquals(t, wrappedHas, "opcode") - OpEquals(t, errcode.OpErrCode{Operation: "opcode", Err: has}, "opcode") -} - -func AssertCodes(t *testing.T, code errcode.ErrorCode, codeStrs ...errcode.CodeStr) { - t.Helper() - AssertCode(t, code, codeStrs...) - AssertHTTPCode(t, code, 400) -} - -func AssertCode(t *testing.T, code errcode.ErrorCode, codeStrs ...errcode.CodeStr) { - t.Helper() - codeStr := codeString - if len(codeStrs) > 0 { - codeStr = codeStrs[0] - } - if code.Code().CodeStr() != codeStr { - t.Errorf("code expected %v\ncode but got %v", codeStr, code.Code().CodeStr()) - } -} - -func AssertHTTPCode(t *testing.T, code errcode.ErrorCode, httpCode int) { - t.Helper() - expected := code.Code().HTTPCode() - if expected != httpCode { - t.Errorf("excpected HTTP Code %v but got %v", httpCode, expected) - } -} - -func ErrorEquals(t *testing.T, err error, msg string) { - if err.Error() != msg { - t.Errorf("Expected error %v. Got error %v", msg, err.Error()) - } -} - -func ClientDataEquals(t *testing.T, code errcode.ErrorCode, data interface{}, codeStrs ...errcode.CodeStr) { - codeStr := codeString - if len(codeStrs) > 0 { - codeStr = codeStrs[0] - } - t.Helper() - if !reflect.DeepEqual(errcode.ClientData(code), data) { - t.Errorf("\nClientData expected: %#v\n ClientData but got: %#v", data, errcode.ClientData(code)) - } - jsonExpected := errcode.JSONFormat{ - Data: data, - Msg: code.Error(), - Code: codeStr, - Operation: errcode.Operation(data), - } - if !reflect.DeepEqual(errcode.NewJSONFormat(code), jsonExpected) { - t.Errorf("\nJSON expected: %+v\n JSON but got: %+v", jsonExpected, errcode.NewJSONFormat(code)) - } -} - -func OpEquals(t *testing.T, code errcode.ErrorCode, op string) { - t.Helper() - opGot, _ := errcode.OperationClientData(code) - if opGot != op { - t.Errorf("\nOp expected: %#v\n Op but got: %#v", op, opGot) - } -} - -func AssertOperation(t *testing.T, v interface{}, op string) { - t.Helper() - opGot := errcode.Operation(v) - if opGot != op { - t.Errorf("\nOp expected: %#v\n Op but got: %#v", op, opGot) - } -} diff --git a/pkg/integration_test/cluster.go b/pkg/integration_test/cluster.go index 7d6354e8a3b..cfe9c057e1a 100644 --- a/pkg/integration_test/cluster.go +++ b/pkg/integration_test/cluster.go @@ -157,11 +157,14 @@ type testCluster struct { servers map[string]*testServer } -func newTestCluster(initialServerCount int) (*testCluster, error) { +// ConfigOption is used to define customize settings in test. +type ConfigOption func(conf *server.Config) + +func newTestCluster(initialServerCount int, opts ...ConfigOption) (*testCluster, error) { config := newClusterConfig(initialServerCount) servers := make(map[string]*testServer) for _, conf := range config.InitialServers { - serverConf, err := conf.Generate() + serverConf, err := conf.Generate(opts...) if err != nil { return nil, err } diff --git a/pkg/integration_test/config.go b/pkg/integration_test/config.go index c209f360511..0c900f17b11 100644 --- a/pkg/integration_test/config.go +++ b/pkg/integration_test/config.go @@ -45,7 +45,7 @@ func newServerConfig(name string, cc *clusterConfig, join bool) *serverConfig { } } -func (c *serverConfig) Generate() (*server.Config, error) { +func (c *serverConfig) Generate(opts ...ConfigOption) (*server.Config, error) { arguments := []string{ "--name=" + c.Name, "--data-dir=" + c.DataDir, @@ -65,6 +65,9 @@ func (c *serverConfig) Generate() (*server.Config, error) { if err != nil { return nil, err } + for _, opt := range opts { + opt(cfg) + } return cfg, nil } diff --git a/pkg/integration_test/region_syncer_test.go b/pkg/integration_test/region_syncer_test.go new file mode 100644 index 00000000000..9989bc66acb --- /dev/null +++ b/pkg/integration_test/region_syncer_test.go @@ -0,0 +1,77 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/kvproto/pkg/metapb" + + "github.com/pingcap/pd/server" + "github.com/pingcap/pd/server/core" +) + +type idAllocator struct { + id uint64 +} + +func (alloc *idAllocator) Alloc() uint64 { + alloc.id++ + return alloc.id +} + +func (s *integrationTestSuite) TestRegionSyncer(c *C) { + c.Parallel() + cluster, err := newTestCluster(3, func(conf *server.Config) { conf.PDServerCfg.EnableRegionStorage = true }) + c.Assert(err, IsNil) + defer cluster.Destroy() + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + leaderServer := cluster.GetServer(cluster.GetLeader()) + s.bootstrapCluster(leaderServer, c) + rc := leaderServer.server.GetRaftCluster() + c.Assert(rc, NotNil) + regionLen := 110 + id := &idAllocator{} + regions := make([]*core.RegionInfo, 0, regionLen) + for i := 0; i < regionLen; i++ { + r := &metapb.Region{ + Id: id.Alloc(), + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, + StartKey: []byte{byte(i)}, + EndKey: []byte{byte(i + 1)}, + Peers: []*metapb.Peer{{Id: id.Alloc(), StoreId: uint64(0)}}, + } + regions = append(regions, core.NewRegionInfo(r, r.Peers[0])) + } + for _, region := range regions { + err = rc.HandleRegionHeartbeat(region) + c.Assert(err, IsNil) + } + // ensure flush to region kv + time.Sleep(3 * time.Second) + err = leaderServer.Stop() + c.Assert(err, IsNil) + cluster.WaitLeader() + leaderServer = cluster.GetServer(cluster.GetLeader()) + c.Assert(leaderServer, NotNil) + loadRegions := leaderServer.server.GetRaftCluster().GetRegions() + c.Assert(len(loadRegions), Equals, regionLen) +} diff --git a/pkg/integration_test/version_upgrade_test.go b/pkg/integration_test/version_upgrade_test.go index 9b3de891bad..d1bc47e8ab7 100644 --- a/pkg/integration_test/version_upgrade_test.go +++ b/pkg/integration_test/version_upgrade_test.go @@ -26,7 +26,7 @@ func (s *integrationTestSuite) bootstrapCluster(server *testServer, c *C) { bootstrapReq := &pdpb.BootstrapRequest{ Header: &pdpb.RequestHeader{ClusterId: server.GetClusterID()}, Store: &metapb.Store{Id: 1, Address: "mock://1"}, - Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{3, 1, false}}}, + Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, IsLearner: false}}}, } _, err := server.server.Bootstrap(context.Background(), bootstrapReq) c.Assert(err, IsNil) diff --git a/server/api/api.raml b/server/api/api.raml index be4dc1752cb..e6a08756059 100644 --- a/server/api/api.raml +++ b/server/api/api.raml @@ -879,6 +879,24 @@ types: description: The input is invalid. 500: description: PD server failed to proceed the request. + /key/{key}: + uriParameters: + key: string + get: + description: List regions from a key. + queryParameters: + limit?: + type: integer + default: 16 + responses: + 200: + body: + application/json: + type: Regions + 400: + description: The input is invalid. + 500: + description: PD server failed to proceed the request. /check/{filter}: uriParameters: filter: @@ -1076,34 +1094,36 @@ types: 500: description: PD server failed to proceed the request. -/admin/cache/region/{id}: - uriParameters: - id: integer - delete: - description: Drop a specific region from cache. - responses: - 200: - description: The region is removed from server cache. - 400: - description: The input is invalid. - 500: - description: PD server failed to proceed the request. +/admin: + /cache/region/{id}: + uriParameters: + id: integer + delete: + description: Drop a specific region from cache. + responses: + 200: + description: The region is removed from server cache. + 400: + description: The input is invalid. + 500: + description: PD server failed to proceed the request. + + /log: + description: The log level of PD server. + post: + description: Set log level. + body: + application/json: + type: string + enum: [ debug, info, warning, error, fatal ] + responses: + 200: + description: The log level is updated. + 400: + description: The input is invalid. + 500: + description: PD server failed to proceed the request. -/log: - description: The log level of PD server. - post: - description: Set log level. - body: - application/json: - type: string - enum: [ debug, info, warning, error, fatal ] - responses: - 200: - description: The log level is updated. - 400: - description: The input is invalid. - 500: - description: PD server failed to proceed the request. /classifier: description: The namespace classifier. Methods depend on current classifier. diff --git a/server/api/config.go b/server/api/config.go index d05b07042c1..31a686ef515 100644 --- a/server/api/config.go +++ b/server/api/config.go @@ -20,7 +20,7 @@ import ( "net/http" "github.com/gorilla/mux" - "github.com/pingcap/pd/pkg/error_code" + "github.com/pingcap/errcode" "github.com/pingcap/pd/server" "github.com/pkg/errors" "github.com/unrolled/render" diff --git a/server/api/diagnose.go b/server/api/diagnose.go index 9319659b0ff..3a79daede74 100644 --- a/server/api/diagnose.go +++ b/server/api/diagnose.go @@ -71,11 +71,11 @@ var ( memberLostPeers: {modMember, levelMajor, "some PD instances is down.", "please check host load and traffic."}, memberLostPeersMoreThanHalf: {modMember, levelCritical, "more than half PD instances is down.", "please check host load and traffic."}, memberLeaderChanged: {modMember, levelMinor, "PD cluster leader is changed.", "please check host load and traffic."}, - tikvCap70: {modTiKV, levelWarning, "some TiKV stroage used more than 70%.", "plase add TiKV node."}, - tikvCap80: {modTiKV, levelMinor, "some TiKV stroage used more than 80%.", "plase add TiKV node."}, - tikvCap90: {modTiKV, levelMajor, "some TiKV stroage used more than 90%.", "plase add TiKV node."}, - tikvLostPeers: {modTiKV, levelWarning, "some TiKV lost connect.", "plase check network."}, - tikvLostPeersLongTime: {modTiKV, levelMajor, "some TiKV lost connect more than 1h.", "plase check network."}, + tikvCap70: {modTiKV, levelWarning, "some TiKV storage used more than 70%.", "please add TiKV node."}, + tikvCap80: {modTiKV, levelMinor, "some TiKV storage used more than 80%.", "please add TiKV node."}, + tikvCap90: {modTiKV, levelMajor, "some TiKV storage used more than 90%.", "please add TiKV node."}, + tikvLostPeers: {modTiKV, levelWarning, "some TiKV lost connect.", "please check network."}, + tikvLostPeersLongTime: {modTiKV, levelMajor, "some TiKV lost connect more than 1h.", "please check network."}, } ) diff --git a/server/api/region.go b/server/api/region.go index 7731c0860fe..cfe7d57a2bf 100644 --- a/server/api/region.go +++ b/server/api/region.go @@ -142,6 +142,40 @@ func (h *regionsHandler) GetAll(w http.ResponseWriter, r *http.Request) { h.rd.JSON(w, http.StatusOK, regionsInfo) } +func (h *regionsHandler) ScanRegionsByKey(w http.ResponseWriter, r *http.Request) { + cluster := h.svr.GetRaftCluster() + if cluster == nil { + h.rd.JSON(w, http.StatusInternalServerError, server.ErrNotBootstrapped.Error()) + return + } + vars := mux.Vars(r) + startKey := vars["key"] + + limit := defaultRegionLimit + if limitStr := r.URL.Query().Get("limit"); limitStr != "" { + var err error + limit, err = strconv.Atoi(limitStr) + if err != nil { + h.rd.JSON(w, http.StatusBadRequest, err.Error()) + return + } + } + if limit > maxRegionLimit { + limit = maxRegionLimit + } + regions := cluster.ScanRegionsByKey([]byte(startKey), limit) + + regionInfos := make([]*regionInfo, 0, len(regions)) + for _, region := range regions { + regionInfos = append(regionInfos, newRegionInfo(region)) + } + regionsInfo := ®ionsInfo{ + Count: len(regionInfos), + Regions: regionInfos, + } + h.rd.JSON(w, http.StatusOK, regionsInfo) +} + func (h *regionsHandler) GetStoreRegions(w http.ResponseWriter, r *http.Request) { cluster := h.svr.GetRaftCluster() if cluster == nil { diff --git a/server/api/region_test.go b/server/api/region_test.go index d468b2d0b01..38b72dafa0e 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -109,6 +109,25 @@ func (s *testRegionSuite) TestRegions(c *C) { } } +func (s *testRegionSuite) TestScanRegionByKey(c *C) { + r1 := newTestRegionInfo(2, 1, []byte("a"), []byte("b")) + r2 := newTestRegionInfo(3, 1, []byte("b"), []byte("c")) + r3 := newTestRegionInfo(4, 2, []byte("c"), []byte("d")) + mustRegionHeartbeat(c, s.svr, r1) + mustRegionHeartbeat(c, s.svr, r2) + mustRegionHeartbeat(c, s.svr, r3) + + url := fmt.Sprintf("%s/regions/key/%s", s.urlPrefix, "b") + regionIds := []uint64{3, 4} + regions := ®ionsInfo{} + err := readJSONWithURL(url, regions) + c.Assert(err, IsNil) + c.Assert(len(regionIds), Equals, regions.Count) + for i, v := range regionIds { + c.Assert(v, Equals, regions.Regions[i].ID) + } +} + func (s *testRegionSuite) TestStoreRegions(c *C) { r1 := newTestRegionInfo(2, 1, []byte("a"), []byte("b")) r2 := newTestRegionInfo(3, 1, []byte("b"), []byte("c")) diff --git a/server/api/router.go b/server/api/router.go index 525c1f723b3..95bbbbd4254 100644 --- a/server/api/router.go +++ b/server/api/router.go @@ -84,6 +84,7 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { regionsHandler := newRegionsHandler(svr, rd) router.HandleFunc("/api/v1/regions", regionsHandler.GetAll).Methods("GET") + router.HandleFunc("/api/v1/regions/key/{key}", regionsHandler.ScanRegionsByKey).Methods("GET") router.HandleFunc("/api/v1/regions/store/{id}", regionsHandler.GetStoreRegions).Methods("GET") router.HandleFunc("/api/v1/regions/writeflow", regionsHandler.GetTopWriteFlow).Methods("GET") router.HandleFunc("/api/v1/regions/readflow", regionsHandler.GetTopReadFlow).Methods("GET") @@ -125,7 +126,7 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { router.HandleFunc("/api/v1/admin/cache/region/{id}", adminHandler.HandleDropCacheRegion).Methods("DELETE") logHanler := newlogHandler(svr, rd) - router.HandleFunc("/api/v1/log", logHanler.Handle).Methods("POST") + router.HandleFunc("/api/v1/admin/log", logHanler.Handle).Methods("POST") router.HandleFunc(pingAPI, func(w http.ResponseWriter, r *http.Request) {}).Methods("GET") router.Handle("/health", newHealthHandler(svr, rd)).Methods("GET") diff --git a/server/api/store.go b/server/api/store.go index 891cc8f7801..267088c3bb8 100644 --- a/server/api/store.go +++ b/server/api/store.go @@ -20,9 +20,9 @@ import ( "time" "github.com/gorilla/mux" + "github.com/pingcap/errcode" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/pd/pkg/apiutil" - "github.com/pingcap/pd/pkg/error_code" "github.com/pingcap/pd/pkg/typeutil" "github.com/pingcap/pd/server" "github.com/pingcap/pd/server/core" diff --git a/server/api/util.go b/server/api/util.go index c94ab46cdac..d8b5ae5fb6a 100644 --- a/server/api/util.go +++ b/server/api/util.go @@ -20,8 +20,8 @@ import ( "io/ioutil" "net/http" + "github.com/pingcap/errcode" "github.com/pingcap/pd/pkg/apiutil" - "github.com/pingcap/pd/pkg/error_code" "github.com/pingcap/pd/server" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -40,7 +40,7 @@ func errorResp(rd *render.Render, w http.ResponseWriter, err error) { rd.JSON(w, http.StatusInternalServerError, "nil error") return } - if errCode, ok := errors.Cause(err).(errcode.ErrorCode); ok { + if errCode := errcode.CodeChain(err); errCode != nil { w.Header().Set("TiDB-Error-Code", errCode.Code().CodeStr().String()) rd.JSON(w, errCode.Code().HTTPCode(), errcode.NewJSONFormat(errCode)) } else { diff --git a/server/cluster.go b/server/cluster.go index f4e849391d8..d3b3a0ad4c5 100644 --- a/server/cluster.go +++ b/server/cluster.go @@ -19,9 +19,9 @@ import ( "sync" "time" + "github.com/pingcap/errcode" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/pd/pkg/error_code" "github.com/pingcap/pd/pkg/logutil" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" @@ -55,8 +55,9 @@ type RaftCluster struct { coordinator *coordinator - wg sync.WaitGroup - quit chan struct{} + wg sync.WaitGroup + quit chan struct{} + regionSyncer *regionSyncer } // ClusterStatus saves some state information @@ -66,10 +67,11 @@ type ClusterStatus struct { func newRaftCluster(s *Server, clusterID uint64) *RaftCluster { return &RaftCluster{ - s: s, - running: false, - clusterID: clusterID, - clusterRoot: s.getClusterRootPath(), + s: s, + running: false, + clusterID: clusterID, + clusterRoot: s.getClusterRootPath(), + regionSyncer: newRegionSyncer(s), } } @@ -115,15 +117,44 @@ func (c *RaftCluster) start() error { c.cachedCluster.regionStats = newRegionStatistics(c.s.scheduleOpt, c.s.classifier) c.quit = make(chan struct{}) - c.wg.Add(2) + c.wg.Add(3) go c.runCoordinator() go c.runBackgroundJobs(backgroundJobInterval) - + go c.runSyncRegions() c.running = true return nil } +func (c *RaftCluster) runSyncRegions() { + defer logutil.LogPanic() + defer c.wg.Done() + var requests []*metapb.Region + ticker := time.NewTicker(syncerKeepAliveInterval) + for { + select { + case <-c.quit: + return + case first := <-c.cachedCluster.getChangedRegions(): + requests = append(requests, first.GetMeta()) + pending := len(c.cachedCluster.getChangedRegions()) + for i := 0; i < pending && i < maxSyncRegionBatchSize; i++ { + region := <-c.cachedCluster.getChangedRegions() + requests = append(requests, region.GetMeta()) + } + regions := &pdpb.SyncRegionResponse{ + Header: &pdpb.ResponseHeader{ClusterId: c.s.clusterID}, + Regions: requests, + } + c.regionSyncer.broadcast(regions) + case <-ticker.C: + alive := &pdpb.SyncRegionResponse{Header: &pdpb.ResponseHeader{ClusterId: c.s.clusterID}} + c.regionSyncer.broadcast(alive) + } + requests = requests[:0] + } +} + func (c *RaftCluster) runCoordinator() { defer logutil.LogPanic() @@ -228,6 +259,11 @@ func (c *RaftCluster) GetRegionInfoByKey(regionKey []byte) *core.RegionInfo { return c.cachedCluster.searchRegion(regionKey) } +// ScanRegionsByKey scans region with start key, until number greater than limit. +func (c *RaftCluster) ScanRegionsByKey(startKey []byte, limit int) []*core.RegionInfo { + return c.cachedCluster.ScanRegions(startKey, limit) +} + // GetRegionByID gets region and leader peer by regionID from cluster. func (c *RaftCluster) GetRegionByID(regionID uint64) (*metapb.Region, *metapb.Peer) { region := c.cachedCluster.GetRegion(regionID) diff --git a/server/cluster_info.go b/server/cluster_info.go index 2d29934180d..592fed1ce35 100644 --- a/server/cluster_info.go +++ b/server/cluster_info.go @@ -38,8 +38,11 @@ type clusterInfo struct { regionStats *regionStatistics labelLevelStats *labelLevelStatistics prepareChecker *prepareChecker + changedRegions chan *core.RegionInfo } +var defaultChangedRegionsLimit = 10000 + func newClusterInfo(id core.IDAllocator, opt *scheduleOption, kv *core.KV) *clusterInfo { return &clusterInfo{ core: schedule.NewBasicCluster(), @@ -48,6 +51,7 @@ func newClusterInfo(id core.IDAllocator, opt *scheduleOption, kv *core.KV) *clus kv: kv, labelLevelStats: newLabelLevelStatistics(), prepareChecker: newPrepareChecker(), + changedRegions: make(chan *core.RegionInfo, defaultChangedRegionsLimit), } } @@ -108,6 +112,10 @@ func (c *clusterInfo) OnStoreVersionChange() { } } +func (c *clusterInfo) getChangedRegions() <-chan *core.RegionInfo { + return c.changedRegions +} + // IsFeatureSupported checks if the feature is supported by current cluster. func (c *clusterInfo) IsFeatureSupported(f Feature) bool { clusterVersion := c.opt.loadClusterVersion() @@ -511,6 +519,10 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error { // after restart. Here we only log the error then go on updating cache. log.Errorf("[region %d] fail to save region %v: %v", region.GetID(), region, err) } + select { + case c.changedRegions <- region: + default: + } } if !isWriteUpdate && !isReadUpdate && !saveCache && !isNew { return nil diff --git a/server/cluster_info_test.go b/server/cluster_info_test.go index e12fade731c..eddef610022 100644 --- a/server/cluster_info_test.go +++ b/server/cluster_info_test.go @@ -700,6 +700,7 @@ func mustSaveRegions(c *C, kv *core.KV, n int) []*metapb.Region { for _, region := range regions { c.Assert(kv.SaveRegion(region), IsNil) } + c.Assert(kv.Flush(), IsNil) return regions } diff --git a/server/config.go b/server/config.go index b36d966fe26..0f9eabcf42f 100644 --- a/server/config.go +++ b/server/config.go @@ -78,6 +78,8 @@ type Config struct { Namespace map[string]NamespaceConfig `json:"namespace"` + PDServerCfg PDServerConfig `toml:"pd-server" json:"pd-server"` + ClusterVersion semver.Version `json:"cluster-version"` // QuotaBackendBytes Raise alarms when backend size exceeds the given quota. 0 means use the default quota. @@ -634,6 +636,12 @@ func (s SecurityConfig) ToTLSConfig() (*tls.Config, error) { return tlsConfig, nil } +// PDServerConfig is the configuration for pd server. +type PDServerConfig struct { + // EnableRegionStorage enables the independent region storage. + EnableRegionStorage bool `toml:"enable-region-storage" json:"enable-region-storage"` +} + // StoreLabel is the config item of LabelPropertyConfig. type StoreLabel struct { Key string `toml:"key" json:"key"` diff --git a/server/core/errors.go b/server/core/errors.go index 30de0dba901..a58b9adcc53 100644 --- a/server/core/errors.go +++ b/server/core/errors.go @@ -20,7 +20,7 @@ import ( "fmt" "net/http" - "github.com/pingcap/pd/pkg/error_code" + "github.com/pingcap/errcode" ) var ( diff --git a/server/core/kv.go b/server/core/kv.go index 58c61842bb3..3eee3c67510 100644 --- a/server/core/kv.go +++ b/server/core/kv.go @@ -19,6 +19,7 @@ import ( "math" "path" "strconv" + "sync/atomic" "github.com/gogo/protobuf/proto" "github.com/pingcap/kvproto/pkg/metapb" @@ -40,6 +41,8 @@ const ( // KV wraps all kv operations, keep it stateless. type KV struct { KVBase + regionKV *RegionKV + useRegionKV int32 } // NewKV creates KV instance with KVBase. @@ -49,11 +52,27 @@ func NewKV(base KVBase) *KV { } } +// SetRegionKV sets the region storage. +func (kv *KV) SetRegionKV(regionKV *RegionKV) *KV { + kv.regionKV = regionKV + return kv +} + +// SwitchToRegionStorage switches to the region storage. +func (kv *KV) SwitchToRegionStorage() { + atomic.StoreInt32(&kv.useRegionKV, 1) +} + +// SwitchToDefaultStorage switches to the to default storage. +func (kv *KV) SwitchToDefaultStorage() { + atomic.StoreInt32(&kv.useRegionKV, 0) +} + func (kv *KV) storePath(storeID uint64) string { return path.Join(clusterPath, "s", fmt.Sprintf("%020d", storeID)) } -func (kv *KV) regionPath(regionID uint64) string { +func regionPath(regionID uint64) string { return path.Join(clusterPath, "r", fmt.Sprintf("%020d", regionID)) } @@ -72,37 +91,54 @@ func (kv *KV) storeRegionWeightPath(storeID uint64) string { // LoadMeta loads cluster meta from KV store. func (kv *KV) LoadMeta(meta *metapb.Cluster) (bool, error) { - return kv.loadProto(clusterPath, meta) + return loadProto(kv.KVBase, clusterPath, meta) } // SaveMeta save cluster meta to KV store. func (kv *KV) SaveMeta(meta *metapb.Cluster) error { - return kv.saveProto(clusterPath, meta) + return saveProto(kv.KVBase, clusterPath, meta) } // LoadStore loads one store from KV. func (kv *KV) LoadStore(storeID uint64, store *metapb.Store) (bool, error) { - return kv.loadProto(kv.storePath(storeID), store) + return loadProto(kv.KVBase, kv.storePath(storeID), store) } // SaveStore saves one store to KV. func (kv *KV) SaveStore(store *metapb.Store) error { - return kv.saveProto(kv.storePath(store.GetId()), store) + return saveProto(kv.KVBase, kv.storePath(store.GetId()), store) } // LoadRegion loads one regoin from KV. func (kv *KV) LoadRegion(regionID uint64, region *metapb.Region) (bool, error) { - return kv.loadProto(kv.regionPath(regionID), region) + if atomic.LoadInt32(&kv.useRegionKV) > 0 { + return loadProto(kv.regionKV, regionPath(regionID), region) + } + return loadProto(kv.KVBase, regionPath(regionID), region) +} + +// LoadRegions loads all regions from KV to RegionsInfo. +func (kv *KV) LoadRegions(regions *RegionsInfo) error { + if atomic.LoadInt32(&kv.useRegionKV) > 0 { + return loadRegions(kv.regionKV, regions) + } + return loadRegions(kv.KVBase, regions) } // SaveRegion saves one region to KV. func (kv *KV) SaveRegion(region *metapb.Region) error { - return kv.saveProto(kv.regionPath(region.GetId()), region) + if atomic.LoadInt32(&kv.useRegionKV) > 0 { + return kv.regionKV.SaveRegion(region) + } + return saveProto(kv.KVBase, regionPath(region.GetId()), region) } // DeleteRegion deletes one region from KV. func (kv *KV) DeleteRegion(region *metapb.Region) error { - return kv.Delete(kv.regionPath(region.GetId())) + if atomic.LoadInt32(&kv.useRegionKV) > 0 { + return deleteRegion(kv.regionKV, region) + } + return deleteRegion(kv.KVBase, region) } // SaveConfig stores marshalable cfg to the configPath. @@ -191,45 +227,20 @@ func (kv *KV) loadFloatWithDefaultValue(path string, def float64) (float64, erro return val, nil } -// LoadRegions loads all regions from KV to RegionsInfo. -func (kv *KV) LoadRegions(regions *RegionsInfo) error { - nextID := uint64(0) - endKey := kv.regionPath(math.MaxUint64) - - // Since the region key may be very long, using a larger rangeLimit will cause - // the message packet to exceed the grpc message size limit (4MB). Here we use - // a variable rangeLimit to work around. - rangeLimit := maxKVRangeLimit - - for { - key := kv.regionPath(nextID) - res, err := kv.LoadRange(key, endKey, rangeLimit) - if err != nil { - if rangeLimit /= 2; rangeLimit >= minKVRangeLimit { - continue - } - return err - } - - for _, s := range res { - region := &metapb.Region{} - if err := region.Unmarshal([]byte(s)); err != nil { - return errors.WithStack(err) - } - - nextID = region.GetId() + 1 - overlaps := regions.SetRegion(NewRegionInfo(region, nil)) - for _, item := range overlaps { - if err := kv.DeleteRegion(item); err != nil { - return err - } - } - } +// Flush flushes the dirty region to storage. +func (kv *KV) Flush() error { + if kv.regionKV != nil { + kv.regionKV.FlushRegion() + } + return nil +} - if len(res) < rangeLimit { - return nil - } +// Close closes the kv. +func (kv *KV) Close() error { + if kv.regionKV != nil { + return kv.regionKV.Close() } + return nil } // SaveGCSafePoint saves new GC safe point to KV. @@ -256,7 +267,7 @@ func (kv *KV) LoadGCSafePoint() (uint64, error) { return safePoint, nil } -func (kv *KV) loadProto(key string, msg proto.Message) (bool, error) { +func loadProto(kv KVBase, key string, msg proto.Message) (bool, error) { value, err := kv.Load(key) if err != nil { return false, err @@ -268,7 +279,7 @@ func (kv *KV) loadProto(key string, msg proto.Message) (bool, error) { return true, errors.WithStack(err) } -func (kv *KV) saveProto(key string, msg proto.Message) error { +func saveProto(kv KVBase, key string, msg proto.Message) error { value, err := proto.Marshal(msg) if err != nil { return errors.WithStack(err) diff --git a/server/core/kv_test.go b/server/core/kv_test.go index 8e88120f1c3..d65e716e4b9 100644 --- a/server/core/kv_test.go +++ b/server/core/kv_test.go @@ -31,7 +31,7 @@ func (s *testKVSuite) TestBasic(c *C) { kv := NewKV(NewMemoryKV()) c.Assert(kv.storePath(123), Equals, "raft/s/00000000000000000123") - c.Assert(kv.regionPath(123), Equals, "raft/r/00000000000000000123") + c.Assert(regionPath(123), Equals, "raft/r/00000000000000000123") meta := &metapb.Cluster{Id: 123} ok, err := kv.LoadMeta(meta) diff --git a/server/core/levedb_kv.go b/server/core/levedb_kv.go new file mode 100644 index 00000000000..b0ae0f85939 --- /dev/null +++ b/server/core/levedb_kv.go @@ -0,0 +1,83 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/gogo/protobuf/proto" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pkg/errors" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type leveldbKV struct { + db *leveldb.DB +} + +// newLeveldbKV is used to store regions information. +func newLeveldbKV(path string) (*leveldbKV, error) { + db, err := leveldb.OpenFile(path, nil) + if err != nil { + return nil, errors.WithStack(err) + } + return &leveldbKV{db: db}, nil +} + +func (kv *leveldbKV) Load(key string) (string, error) { + v, err := kv.db.Get([]byte(key), nil) + if err != nil { + return "", errors.WithStack(err) + } + return string(v), err +} + +func (kv *leveldbKV) LoadRange(startKey, endKey string, limit int) ([]string, error) { + iter := kv.db.NewIterator(&util.Range{Start: []byte(startKey), Limit: []byte(endKey)}, nil) + values := make([]string, 0, limit) + count := 0 + for iter.Next() { + if count >= limit { + break + } + values = append(values, string(iter.Value())) + count++ + } + iter.Release() + return values, nil +} + +func (kv *leveldbKV) Save(key, value string) error { + return errors.WithStack(kv.db.Put([]byte(key), []byte(value), nil)) +} + +func (kv *leveldbKV) Delete(key string) error { + return errors.WithStack(kv.db.Delete([]byte(key), nil)) +} + +func (kv *leveldbKV) SaveRegions(regions map[string]*metapb.Region) error { + batch := new(leveldb.Batch) + + for key, r := range regions { + value, err := proto.Marshal(r) + if err != nil { + return errors.WithStack(err) + } + batch.Put([]byte(key), value) + } + return errors.WithStack(kv.db.Write(batch, nil)) +} + +func (kv *leveldbKV) Close() error { + return errors.WithStack(kv.db.Close()) +} diff --git a/server/core/region_kv.go b/server/core/region_kv.go new file mode 100644 index 00000000000..be1ad86a80a --- /dev/null +++ b/server/core/region_kv.go @@ -0,0 +1,183 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + "math" + "sync" + "time" + + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +var dirtyFlushTick = time.Second + +// RegionKV is used to save regions. +type RegionKV struct { + *leveldbKV + mu sync.RWMutex + batchRegions map[string]*metapb.Region + batchSize int + cacheSize int + flushRate time.Duration + flushTime time.Time + ctx context.Context + cancel context.CancelFunc +} + +const ( + //DefaultFlushRegionRate is the ttl to sync the regions to kv storage. + defaultFlushRegionRate = 3 * time.Second + //DefaultBatchSize is the batch size to save the regions to kv storage. + defaultBatchSize = 100 +) + +// NewRegionKV returns a kv storage that is used to save regions. +func NewRegionKV(path string) (*RegionKV, error) { + levelDB, err := newLeveldbKV(path) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + kv := &RegionKV{ + leveldbKV: levelDB, + batchSize: defaultBatchSize, + flushRate: defaultFlushRegionRate, + batchRegions: make(map[string]*metapb.Region, defaultBatchSize), + flushTime: time.Now().Add(defaultFlushRegionRate), + ctx: ctx, + cancel: cancel, + } + kv.backgroundFlush() + return kv, nil +} + +func (kv *RegionKV) backgroundFlush() { + ticker := time.NewTicker(dirtyFlushTick) + var ( + isFlush bool + err error + ) + go func() { + defer ticker.Stop() + for { + select { + case <-ticker.C: + kv.mu.RLock() + isFlush = kv.flushTime.Before(time.Now()) + kv.mu.RUnlock() + if !isFlush { + continue + } + if err = kv.FlushRegion(); err != nil { + log.Error("flush regions error: ", err) + } + case <-kv.ctx.Done(): + return + } + } + }() +} + +// SaveRegion saves one region to KV. +func (kv *RegionKV) SaveRegion(region *metapb.Region) error { + kv.mu.Lock() + defer kv.mu.Unlock() + if kv.cacheSize < kv.batchSize-1 { + kv.batchRegions[regionPath(region.GetId())] = region + kv.cacheSize++ + + kv.flushTime = time.Now().Add(kv.flushRate) + return nil + } + kv.batchRegions[regionPath(region.GetId())] = region + err := kv.flush() + + if err != nil { + return err + } + return nil +} + +func deleteRegion(kv KVBase, region *metapb.Region) error { + return kv.Delete(regionPath(region.GetId())) +} + +func loadRegions(kv KVBase, regions *RegionsInfo) error { + nextID := uint64(0) + endKey := regionPath(math.MaxUint64) + + // Since the region key may be very long, using a larger rangeLimit will cause + // the message packet to exceed the grpc message size limit (4MB). Here we use + // a variable rangeLimit to work around. + rangeLimit := maxKVRangeLimit + for { + startKey := regionPath(nextID) + res, err := kv.LoadRange(startKey, endKey, rangeLimit) + if err != nil { + if rangeLimit /= 2; rangeLimit >= minKVRangeLimit { + continue + } + return err + } + + for _, s := range res { + region := &metapb.Region{} + if err := region.Unmarshal([]byte(s)); err != nil { + return errors.WithStack(err) + } + + nextID = region.GetId() + 1 + overlaps := regions.SetRegion(NewRegionInfo(region, nil)) + for _, item := range overlaps { + if err := deleteRegion(kv, item); err != nil { + return err + } + } + } + + if len(res) < rangeLimit { + return nil + } + } +} + +// FlushRegion saves the cache region to region kv storage. +func (kv *RegionKV) FlushRegion() error { + kv.mu.Lock() + defer kv.mu.Unlock() + return kv.flush() +} + +func (kv *RegionKV) flush() error { + if err := kv.SaveRegions(kv.batchRegions); err != nil { + return err + } + kv.cacheSize = 0 + kv.batchRegions = make(map[string]*metapb.Region, kv.batchSize) + return nil +} + +// Close closes the kv. +func (kv *RegionKV) Close() error { + err := kv.FlushRegion() + if err != nil { + log.Error("meet error before close the region storage: ", err) + } + kv.cancel() + return kv.db.Close() +} diff --git a/server/core/store.go b/server/core/store.go index e619201e881..767f6e8c521 100644 --- a/server/core/store.go +++ b/server/core/store.go @@ -21,9 +21,9 @@ import ( "time" "github.com/gogo/protobuf/proto" + "github.com/pingcap/errcode" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/pd/pkg/error_code" log "github.com/sirupsen/logrus" ) diff --git a/server/grpc_service.go b/server/grpc_service.go index 5bf55e505f2..ef08b785f2f 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -610,6 +610,26 @@ func (s *Server) GetGCSafePoint(ctx context.Context, request *pdpb.GetGCSafePoin }, nil } +// SyncRegions syncs the regions. +func (s *Server) SyncRegions(stream pdpb.PD_SyncRegionsServer) error { + for { + request, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return errors.WithStack(err) + } + if request.GetHeader().GetClusterId() != s.clusterID { + return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.clusterID, request.GetHeader().GetClusterId()) + } + log.Infof("establish sync region stream with %s [%s]", request.GetMember().GetName(), request.GetMember().GetClientUrls()[0]) + if s.cluster.regionSyncer != nil { + s.cluster.regionSyncer.bindStream(request.GetMember().GetName(), stream) + } + } +} + // UpdateGCSafePoint implements gRPC PDServer. func (s *Server) UpdateGCSafePoint(ctx context.Context, request *pdpb.UpdateGCSafePointRequest) (*pdpb.UpdateGCSafePointResponse, error) { if err := s.validateRequest(request.GetHeader()); err != nil { diff --git a/server/heartbeat_stream_test.go b/server/heartbeat_stream_test.go index 7d0746ec4a2..f1a5c4bd93e 100644 --- a/server/heartbeat_stream_test.go +++ b/server/heartbeat_stream_test.go @@ -54,7 +54,8 @@ func (s *testHeartbeatStreamSuite) TestActivity(c *C) { storeID, err := s.svr.idAlloc.Alloc() c.Assert(err, IsNil) putStore(c, s.grpcPDClient, s.svr.clusterID, &metapb.Store{Id: storeID, Address: "127.0.0.1:1"}) - newHandler(s.svr).AddAddPeerOperator(s.region.GetId(), storeID) + err = newHandler(s.svr).AddAddPeerOperator(s.region.GetId(), storeID) + c.Assert(err, IsNil) stream1, stream2 := newRegionheartbeatClient(c, s.grpcPDClient), newRegionheartbeatClient(c, s.grpcPDClient) defer stream1.close() diff --git a/server/leader.go b/server/leader.go index 12aff713aa8..e92bd02ee16 100644 --- a/server/leader.go +++ b/server/leader.go @@ -238,7 +238,7 @@ func (s *Server) campaignLeader() error { } log.Debugf("campaign leader ok %s", s.Name()) - err = s.scheduleOpt.reload(s.kv) + err = s.reloadConfigFromKV() if err != nil { return err } @@ -298,6 +298,15 @@ func (s *Server) watchLeader(leader *pdpb.Member) { ctx, cancel := context.WithCancel(s.serverLoopCtx) defer cancel() + err := s.reloadConfigFromKV() + if err != nil { + log.Error("reload config failed:", err) + return + } + if s.scheduleOpt.loadPDServerConfig().EnableRegionStorage { + s.cluster.regionSyncer.startSyncWithLeader(leader.GetClientUrls()[0]) + defer s.cluster.regionSyncer.stopSyncWithLeader() + } for { rch := watcher.Watch(ctx, s.getLeaderPath()) @@ -364,3 +373,18 @@ func (s *Server) deleteLeaderKey() error { func (s *Server) leaderCmp() clientv3.Cmp { return clientv3.Compare(clientv3.Value(s.getLeaderPath()), "=", s.memberValue) } + +func (s *Server) reloadConfigFromKV() error { + err := s.scheduleOpt.reload(s.kv) + if err != nil { + return err + } + if s.scheduleOpt.loadPDServerConfig().EnableRegionStorage { + s.kv.SwitchToRegionStorage() + log.Info("server enable region storage") + } else { + s.kv.SwitchToDefaultStorage() + log.Info("server disable region storage") + } + return nil +} diff --git a/server/option.go b/server/option.go index b14fe566cc4..9b9148e22ee 100644 --- a/server/option.go +++ b/server/option.go @@ -31,6 +31,7 @@ type scheduleOption struct { ns map[string]*namespaceOption labelProperty atomic.Value clusterVersion atomic.Value + pdServerConfig atomic.Value } func newScheduleOption(cfg *Config) *scheduleOption { @@ -42,6 +43,7 @@ func newScheduleOption(cfg *Config) *scheduleOption { o.ns[name] = newNamespaceOption(&nsCfg) } o.rep = newReplication(&cfg.Replication) + o.pdServerConfig.Store(&cfg.PDServerCfg) o.labelProperty.Store(cfg.LabelProperty) o.clusterVersion.Store(cfg.ClusterVersion) return o @@ -259,6 +261,10 @@ func (o *scheduleOption) loadClusterVersion() semver.Version { return o.clusterVersion.Load().(semver.Version) } +func (o *scheduleOption) loadPDServerConfig() *PDServerConfig { + return o.pdServerConfig.Load().(*PDServerConfig) +} + func (o *scheduleOption) persist(kv *core.KV) error { namespaces := make(map[string]NamespaceConfig) for name, ns := range o.ns { @@ -270,6 +276,7 @@ func (o *scheduleOption) persist(kv *core.KV) error { Namespace: namespaces, LabelProperty: o.loadLabelPropertyConfig(), ClusterVersion: o.loadClusterVersion(), + PDServerCfg: *o.loadPDServerConfig(), } err := kv.SaveConfig(cfg) return err @@ -286,6 +293,7 @@ func (o *scheduleOption) reload(kv *core.KV) error { Namespace: namespaces, LabelProperty: o.loadLabelPropertyConfig().clone(), ClusterVersion: o.loadClusterVersion(), + PDServerCfg: *o.loadPDServerConfig(), } isExist, err := kv.LoadConfig(cfg) if err != nil { @@ -301,6 +309,7 @@ func (o *scheduleOption) reload(kv *core.KV) error { } o.labelProperty.Store(cfg.LabelProperty) o.clusterVersion.Store(cfg.ClusterVersion) + o.pdServerConfig.Store(&cfg.PDServerCfg) } return nil } diff --git a/server/placement/constraint.go b/server/placement/constraint.go index 7682df4ea3a..6aabc468dbe 100644 --- a/server/placement/constraint.go +++ b/server/placement/constraint.go @@ -13,6 +13,8 @@ package placement +import "github.com/pingcap/pd/server/core" + // Config is consist of a list of constraints. type Config struct { Constraints []*Constraint @@ -36,3 +38,11 @@ type Filter struct { } var functionList = []string{"count", "label_values", "count_leader", "isolation_level"} + +// Cluster provides an overview of a cluster's region distribution. +type Cluster interface { + GetRegion(id uint64) *core.RegionInfo + GetStores() []*core.StoreInfo + GetStore(id uint64) *core.StoreInfo + GetRegionStores(id uint64) []*core.StoreInfo +} diff --git a/server/placement/functions.go b/server/placement/functions.go new file mode 100644 index 00000000000..7fbf5212709 --- /dev/null +++ b/server/placement/functions.go @@ -0,0 +1,119 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package placement + +import ( + "math" + + "github.com/pingcap/pd/server/core" +) + +// Score calculates the score of the constraint expression. The greater score is +// better. A nagative score means the constraint is not satisfied. +func (c Constraint) Score(region *core.RegionInfo, cluster Cluster) int { + v0, v := c.Value, c.eval(region, cluster) + switch c.Op { + case "=": + return -int(math.Abs(float64(v - v0))) + case "<=": + return v0 - v + case ">=": + return v - v0 + case "<": + return v0 - v - 1 + case ">": + return v - v0 - 1 + } + return -1 +} + +func (c Constraint) eval(region *core.RegionInfo, cluster Cluster) int { + switch c.Function { + case "count": + return c.evalCount(region, cluster) + case "label_values": + return c.evalLabelValues(region, cluster) + case "count_leader": + return c.evalCountLeader(region, cluster) + case "isolation_level": + return c.evalIsolationLevel(region, cluster) + } + return 0 +} + +func (c Constraint) evalCount(region *core.RegionInfo, cluster Cluster) int { + stores := c.filterStores(cluster.GetRegionStores(region.GetID())) + return len(stores) +} + +func (c Constraint) evalLabelValues(region *core.RegionInfo, cluster Cluster) int { + stores := c.filterStores(cluster.GetRegionStores(region.GetID())) + return c.countLabelValues(stores, c.Labels) +} + +func (c Constraint) evalCountLeader(region *core.RegionInfo, cluster Cluster) int { + leaderStore := cluster.GetStore(region.GetLeader().GetStoreId()) + if leaderStore != nil && c.matchStore(leaderStore) { + return 1 + } + return 0 +} + +func (c Constraint) evalIsolationLevel(region *core.RegionInfo, cluster Cluster) int { + stores := c.filterStores(cluster.GetRegionStores(region.GetID())) + for i := range c.Labels { + if c.countLabelValues(stores, c.Labels[:i+1]) == len(stores) { + return len(c.Labels) - i + } + } + return 0 +} + +func (c Constraint) filterStores(stores []*core.StoreInfo) []*core.StoreInfo { + res := stores[:0] + for _, s := range stores { + if c.matchStore(s) { + res = append(res, s) + } + } + return res +} + +func (c Constraint) matchStore(store *core.StoreInfo) bool { + for _, f := range c.Filters { + if store.GetLabelValue(f.Key) != f.Value { + return false + } + } + return true +} + +func (c Constraint) labelValues(store *core.StoreInfo, labels []string) string { + var str string + for _, label := range labels { + str = str + store.GetLabelValue(label) + "/" // Symbol '/' never be included in a label value. + } + return str +} + +func (c Constraint) countLabelValues(stores []*core.StoreInfo, labels []string) int { + if len(labels) == 0 { + return 0 + } + set := make(map[string]struct{}) + for _, s := range stores { + set[c.labelValues(s, labels)] = struct{}{} + } + return len(set) +} diff --git a/server/placement/placement_test.go b/server/placement/placement_test.go index d5579a075b2..a56f0c3f002 100644 --- a/server/placement/placement_test.go +++ b/server/placement/placement_test.go @@ -16,7 +16,10 @@ package placement import ( "testing" + "github.com/pingcap/kvproto/pkg/metapb" + . "github.com/pingcap/check" + "github.com/pingcap/pd/server/core" ) func TestPlacement(t *testing.T) { @@ -93,3 +96,160 @@ func (s *testPlacementSuite) constraint(function string, op string, value int, a func (s *testPlacementSuite) config(constraints ...*Constraint) *Config { return &Config{Constraints: constraints} } + +func (s *testPlacementSuite) TestFunctions(c *C) { + cluster := newMockCluster() + cluster.PutStore(1, "zone", "z1", "host", "h1", "disk", "ssd") + cluster.PutStore(2, "zone", "z1", "host", "h1", "disk", "ssd") + cluster.PutStore(3, "zone", "z1", "host", "h2", "disk", "hdd") + cluster.PutStore(4, "zone", "z2", "host", "h1", "disk", "ssd") + + cases := []struct { + config string + regionStores []uint64 + }{ + {"count()=3", []uint64{1, 2, 3}}, + {"count()=1", []uint64{3}}, + {"count(zone:z1)=3", []uint64{1, 2, 3, 4}}, + {"count(zone:z2,host:h2)=0", []uint64{1, 2, 3, 4}}, + {"count(disk:ssd,zone,host)=3", []uint64{1, 2, 3, 4}}, + {"count(foo:bar)=0", []uint64{1, 2, 3, 4}}, + {"label_values()=0", []uint64{1, 2, 3, 4}}, + {"label_values(disk)=2", []uint64{1, 2, 3, 4}}, + {"label_values(disk)=1", []uint64{1, 2}}, + {"label_values(foo)=1", []uint64{1, 2, 3, 4}}, // All replicas have empty value for key 'foo'. + {"label_values(zone)=2", []uint64{1, 2, 3, 4}}, + {"label_values(zone,host)=3", []uint64{1, 2, 3, 4}}, + {"label_values(zone:z2,host)=1", []uint64{1, 2, 3, 4}}, + {"label_values(zone:z1,host)=2", []uint64{1, 2, 3, 4}}, + {"label_values(zone:z1,host,disk)=2", []uint64{1, 2, 3, 4}}, + {"count_leader()=1", []uint64{1, 2, 3, 4}}, + {"count_leader(zone:z2)=0", []uint64{1, 2, 3, 4}}, + {"count_leader(zone:z1,disk:ssd)=1", []uint64{1, 2, 3, 4}}, + {"count_leader(zone:z1,disk:ssd)=0", []uint64{3, 1, 2, 4}}, + {"isolation_level(zone)=0", []uint64{1, 2}}, + {"isolation_level(zone)=1", []uint64{1, 4}}, + {"isolation_level(zone,host)=2", []uint64{1, 4}}, + {"isolation_level()=0", []uint64{1, 4}}, + {"isolation_level(zone,host)=1", []uint64{2, 3, 4}}, + {"isolation_level(zone,host,disk)=2", []uint64{2, 3, 4}}, + {"isolation_level(zone,host,disk)=0", []uint64{1, 2}}, + {"isolation_level(host)=0", []uint64{2, 4}}, + {"isolation_level(host,zone)=1", []uint64{2, 4}}, + } + + for _, t := range cases { + constraint, err := parseConstraint(t.config) + c.Assert(err, IsNil) + cluster.PutRegion(1, t.regionStores...) + c.Assert(constraint.Score(cluster.GetRegion(1), cluster), Equals, 0) + } +} + +func (s *testPlacementSuite) TestScore(c *C) { + cluster := newMockCluster() + cluster.PutStore(1) + cluster.PutStore(2) + cluster.PutStore(3) + cluster.PutRegion(1, 1, 2, 3) + + cases := []struct { + config string + score int + }{ + {"count()=1", -2}, + {"count()=2", -1}, + {"count()=3", 0}, + {"count()=4", -1}, + {"count()=5", -2}, + + {"count()<=1", -2}, + {"count()<=2", -1}, + {"count()<=3", 0}, + {"count()<=4", 1}, + {"count()<=5", 2}, + + {"count()<1", -3}, + {"count()<2", -2}, + {"count()<3", -1}, + {"count()<4", 0}, + {"count()<5", 1}, + + {"count()>=1", 2}, + {"count()>=2", 1}, + {"count()>=3", 0}, + {"count()>=4", -1}, + {"count()>=5", -2}, + + {"count()>1", 1}, + {"count()>2", 0}, + {"count()>3", -1}, + {"count()>4", -2}, + {"count()>5", -3}, + } + + for _, t := range cases { + constraint, err := parseConstraint(t.config) + c.Assert(err, IsNil) + c.Assert(constraint.Score(cluster.GetRegion(1), cluster), Equals, t.score) + } +} + +type mockCluster struct { + regions map[uint64]*core.RegionInfo + stores map[uint64]*core.StoreInfo +} + +func newMockCluster() *mockCluster { + return &mockCluster{ + regions: make(map[uint64]*core.RegionInfo), + stores: make(map[uint64]*core.StoreInfo), + } +} + +func (c *mockCluster) GetRegion(id uint64) *core.RegionInfo { + return c.regions[id] +} + +func (c *mockCluster) GetStores() []*core.StoreInfo { + stores := make([]*core.StoreInfo, 0, len(c.stores)) + for _, s := range c.stores { + stores = append(stores, s) + } + return stores +} + +func (c *mockCluster) GetStore(id uint64) *core.StoreInfo { + return c.stores[id] +} + +func (c *mockCluster) GetRegionStores(id uint64) []*core.StoreInfo { + region := c.GetRegion(id) + if region == nil { + return nil + } + stores := make([]*core.StoreInfo, 0, len(region.GetPeers())) + for _, p := range region.GetPeers() { + store := c.GetStore(p.GetStoreId()) + if store != nil { + stores = append(stores, store) + } + } + return stores +} + +func (c *mockCluster) PutRegion(id uint64, stores ...uint64) { + meta := &metapb.Region{Id: id} + for _, s := range stores { + meta.Peers = append(meta.Peers, &metapb.Peer{StoreId: s}) + } + c.regions[id] = core.NewRegionInfo(meta, &metapb.Peer{StoreId: stores[0]}) +} + +func (c *mockCluster) PutStore(id uint64, labelPairs ...string) { + var labels []*metapb.StoreLabel + for i := 0; i < len(labelPairs); i += 2 { + labels = append(labels, &metapb.StoreLabel{Key: labelPairs[i], Value: labelPairs[i+1]}) + } + c.stores[id] = core.NewStoreInfo(&metapb.Store{Id: id, Labels: labels}) +} diff --git a/server/region_syncer.go b/server/region_syncer.go new file mode 100644 index 00000000000..23c59e2657b --- /dev/null +++ b/server/region_syncer.go @@ -0,0 +1,180 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "net/url" + "sync" + "time" + + "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + msgSize = 8 * 1024 * 1024 + maxSyncRegionBatchSize = 100 + syncerKeepAliveInterval = 10 * time.Second +) + +type syncerClient interface { + Recv() (*pdpb.SyncRegionResponse, error) + CloseSend() error +} +type syncerServer interface { + Send(regions *pdpb.SyncRegionResponse) error +} + +type regionSyncer struct { + sync.RWMutex + streams map[string]syncerServer + ctx context.Context + cancel context.CancelFunc + server *Server + closed chan struct{} + wg sync.WaitGroup +} + +func newRegionSyncer(server *Server) *regionSyncer { + return ®ionSyncer{ + streams: make(map[string]syncerServer), + server: server, + closed: make(chan struct{}), + } +} + +func (s *regionSyncer) bindStream(name string, stream syncerServer) { + s.Lock() + defer s.Unlock() + s.streams[name] = stream +} + +func (s *regionSyncer) broadcast(regions *pdpb.SyncRegionResponse) { + failed := make([]string, 0, 3) + s.RLock() + for name, sender := range s.streams { + err := sender.Send(regions) + if err != nil { + log.Error("region syncer send data meet error:", err) + failed = append(failed, name) + } + } + s.RUnlock() + if len(failed) > 0 { + s.Lock() + for _, name := range failed { + delete(s.streams, name) + log.Infof("region syncer delete the stream of %s", name) + } + s.Unlock() + } +} + +func (s *regionSyncer) stopSyncWithLeader() { + s.reset() + s.Lock() + close(s.closed) + s.closed = make(chan struct{}) + s.Unlock() + s.wg.Wait() +} + +func (s *regionSyncer) reset() { + s.Lock() + defer s.Unlock() + + if s.cancel == nil { + return + } + s.cancel() + s.cancel, s.ctx = nil, nil +} + +func (s *regionSyncer) establish(addr string) (syncerClient, error) { + s.reset() + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + cc, err := grpc.Dial(u.Host, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(msgSize))) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(s.server.serverLoopCtx) + client, err := pdpb.NewPDClient(cc).SyncRegions(ctx) + if err != nil { + cancel() + return nil, err + } + err = client.Send(&pdpb.SyncRegionRequest{ + Header: &pdpb.RequestHeader{ClusterId: s.server.clusterID}, + Member: s.server.member, + }) + if err != nil { + cancel() + return nil, err + } + s.Lock() + s.ctx, s.cancel = ctx, cancel + s.Unlock() + return client, nil +} + +func (s *regionSyncer) startSyncWithLeader(addr string) { + s.wg.Add(1) + s.RLock() + closed := s.closed + s.RUnlock() + go func() { + defer s.wg.Done() + for { + select { + case <-closed: + return + default: + } + // establish client + client, err := s.establish(addr) + if err != nil { + if ev, ok := status.FromError(err); ok { + if ev.Code() == codes.Canceled { + return + } + } + log.Errorf("%s failed to establish sync stream with leader %s: %s", s.server.member.GetName(), s.server.GetLeader().GetName(), err) + time.Sleep(time.Second) + continue + } + log.Infof("%s start sync with leader %s", s.server.member.GetName(), s.server.GetLeader().GetName()) + for { + resp, err := client.Recv() + if err != nil { + log.Error("region sync with leader meet error:", err) + client.CloseSend() + break + } + for _, r := range resp.GetRegions() { + s.server.kv.SaveRegion(r) + } + } + } + }() +} diff --git a/server/server.go b/server/server.go index eec25e3df56..0ce44c4abb7 100644 --- a/server/server.go +++ b/server/server.go @@ -19,6 +19,7 @@ import ( "math/rand" "net/http" "path" + "path/filepath" "strconv" "strings" "sync" @@ -208,7 +209,12 @@ func (s *Server) startServer() error { s.idAlloc = &idAllocator{s: s} kvBase := newEtcdKVBase(s) - s.kv = core.NewKV(kvBase) + path := filepath.Join(s.cfg.DataDir, "region-meta") + regionKV, err := core.NewRegionKV(path) + if err != nil { + return err + } + s.kv = core.NewKV(kvBase).SetRegionKV(regionKV) s.cluster = newRaftCluster(s, s.clusterID) s.hbStreams = newHeartbeatStreams(s.clusterID) if s.classifier, err = namespace.CreateClassifier(s.cfg.NamespaceClassifier, s.kv, s.idAlloc); err != nil { @@ -258,6 +264,9 @@ func (s *Server) Close() { if s.hbStreams != nil { s.hbStreams.Close() } + if err := s.kv.Close(); err != nil { + log.Errorf("close kv meet error: %s", err) + } log.Info("close server") } @@ -388,7 +397,14 @@ func (s *Server) bootstrapCluster(req *pdpb.BootstrapRequest) (*pdpb.BootstrapRe } log.Infof("bootstrap cluster %d ok", clusterID) - + err = s.kv.SaveRegion(req.GetRegion()) + if err != nil { + log.Warnf("save the bootstrap region failed: %s", err) + } + err = s.kv.Flush() + if err != nil { + log.Warnf("flush the bootstrap region failed: %s", err) + } if err := s.cluster.start(); err != nil { return nil, err } diff --git a/server/store_statistics.go b/server/store_statistics.go index 08f46c254cb..f3169d0072d 100644 --- a/server/store_statistics.go +++ b/server/store_statistics.go @@ -45,6 +45,7 @@ func newStoreStatistics(opt *scheduleOption, namespace string) *storeStatistics } func (s *storeStatistics) Observe(store *core.StoreInfo) { + id := strconv.FormatUint(store.GetId(), 10) // Store state. switch store.GetState() { case metapb.StoreState_Up: @@ -61,6 +62,7 @@ func (s *storeStatistics) Observe(store *core.StoreInfo) { s.Offline++ case metapb.StoreState_Tombstone: s.Tombstone++ + s.resetStoreStatistics(id) return } if store.IsLowSpace(s.opt.GetLowSpaceRatio()) { @@ -73,7 +75,6 @@ func (s *storeStatistics) Observe(store *core.StoreInfo) { s.RegionCount += store.RegionCount s.LeaderCount += store.LeaderCount - id := strconv.FormatUint(store.GetId(), 10) storeStatusGauge.WithLabelValues(s.namespace, id, "region_score").Set(store.RegionScore(s.opt.GetHighSpaceRatio(), s.opt.GetLowSpaceRatio(), 0)) storeStatusGauge.WithLabelValues(s.namespace, id, "leader_score").Set(store.LeaderScore(0)) storeStatusGauge.WithLabelValues(s.namespace, id, "region_size").Set(float64(store.RegionSize)) @@ -104,6 +105,18 @@ func (s *storeStatistics) Collect() { } } +func (s *storeStatistics) resetStoreStatistics(id string) { + storeStatusGauge.WithLabelValues(s.namespace, id, "region_score").Set(0) + storeStatusGauge.WithLabelValues(s.namespace, id, "leader_score").Set(0) + storeStatusGauge.WithLabelValues(s.namespace, id, "region_size").Set(0) + storeStatusGauge.WithLabelValues(s.namespace, id, "region_count").Set(0) + storeStatusGauge.WithLabelValues(s.namespace, id, "leader_size").Set(0) + storeStatusGauge.WithLabelValues(s.namespace, id, "leader_count").Set(0) + storeStatusGauge.WithLabelValues(s.namespace, id, "store_available").Set(0) + storeStatusGauge.WithLabelValues(s.namespace, id, "store_used").Set(0) + storeStatusGauge.WithLabelValues(s.namespace, id, "store_capacity").Set(0) +} + type storeStatisticsMap struct { opt *scheduleOption classifier namespace.Classifier diff --git a/tools/pd-ctl/pdctl/command/log_command.go b/tools/pd-ctl/pdctl/command/log_command.go index a84cd58bb36..f456624c079 100644 --- a/tools/pd-ctl/pdctl/command/log_command.go +++ b/tools/pd-ctl/pdctl/command/log_command.go @@ -23,7 +23,7 @@ import ( ) var ( - logPrefix = "pd/api/v1/log" + logPrefix = "pd/api/v1/admin/log" ) // NewLogCommand New a log subcommand of the rootCmd diff --git a/tools/pd-ctl/pdctl/command/region_command.go b/tools/pd-ctl/pdctl/command/region_command.go index 6af72af8e38..12e2ddd6774 100644 --- a/tools/pd-ctl/pdctl/command/region_command.go +++ b/tools/pd-ctl/pdctl/command/region_command.go @@ -49,6 +49,7 @@ func NewRegionCommand() *cobra.Command { r.AddCommand(NewRegionWithCheckCommand()) r.AddCommand(NewRegionWithSiblingCommand()) r.AddCommand(NewRegionWithStoreCommand()) + r.AddCommand(NewRegionsWithStartKeyCommand()) topRead := &cobra.Command{ Use: "topread ", @@ -265,6 +266,60 @@ func decodeProtobufText(text string) (string, error) { return string(buf), nil } +// NewRegionsWithStartKeyCommand returns regions from startkey subcommand of regionCmd. +func NewRegionsWithStartKeyCommand() *cobra.Command { + r := &cobra.Command{ + Use: "startkey [--format=raw|pb|proto|protobuf] ", + Short: "show regions from start key", + Run: showRegionsFromStartKeyCommandFunc, + } + + r.Flags().String("format", "raw", "the key format") + return r +} + +func showRegionsFromStartKeyCommandFunc(cmd *cobra.Command, args []string) { + if len(args) < 1 || len(args) > 2 { + fmt.Println(cmd.UsageString()) + return + } + + var ( + key string + err error + ) + + format := cmd.Flags().Lookup("format").Value.String() + switch format { + case "raw": + key = args[0] + case "pb", "proto", "protobuf": + key, err = decodeProtobufText(args[0]) + if err != nil { + fmt.Println("Error: ", err) + return + } + default: + fmt.Println("Error: unknown format") + return + } + // TODO: Deal with path escaped + prefix := regionKeyPrefix + "/" + key + if len(args) == 2 { + if _, err = strconv.Atoi(args[1]); err != nil { + fmt.Println("limit should be a number") + return + } + prefix += "?limit=" + args[1] + } + r, err := doRequest(cmd, prefix, http.MethodGet) + if err != nil { + fmt.Printf("Failed to get region: %s\n", err) + return + } + fmt.Println(r) +} + // NewRegionWithCheckCommand returns a region with check subcommand of regionCmd func NewRegionWithCheckCommand() *cobra.Command { r := &cobra.Command{ diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 01b5743200b..00000000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 TOML authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 01b5743200b..00000000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 TOML authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 01b5743200b..00000000000 --- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 TOML authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/etcd-io/gofail/gofail.go b/vendor/github.com/etcd-io/gofail/gofail.go new file mode 100644 index 00000000000..a1798fc8377 --- /dev/null +++ b/vendor/github.com/etcd-io/gofail/gofail.go @@ -0,0 +1,195 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// gofail is a tool for enabling/disabling failpoints in go code. +package main + +import ( + "fmt" + "go/build" + "io" + "os" + "path" + "path/filepath" + "strings" + + "github.com/etcd-io/gofail/code" +) + +type xfrmFunc func(io.Writer, io.Reader) ([]*code.Failpoint, error) + +func xfrmFile(xfrm xfrmFunc, path string) ([]*code.Failpoint, error) { + src, serr := os.Open(path) + if serr != nil { + return nil, serr + } + defer src.Close() + + dst, derr := os.OpenFile(path+".tmp", os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if derr != nil { + return nil, derr + } + defer dst.Close() + + fps, xerr := xfrm(dst, src) + if xerr != nil || len(fps) == 0 { + os.Remove(dst.Name()) + return nil, xerr + } + + rerr := os.Rename(dst.Name(), path) + if rerr != nil { + os.Remove(dst.Name()) + return nil, rerr + } + + return fps, nil +} + +func dir2files(dir, ext string) (ret []string, err error) { + if dir, err = filepath.Abs(dir); err != nil { + return nil, err + } + + f, ferr := os.Open(dir) + if ferr != nil { + return nil, ferr + } + defer f.Close() + + names, rerr := f.Readdirnames(0) + if rerr != nil { + return nil, rerr + } + for _, f := range names { + if path.Ext(f) != ext { + continue + } + ret = append(ret, path.Join(dir, f)) + } + return ret, nil +} + +func paths2files(paths []string) (files []string) { + // no paths => use cwd + if len(paths) == 0 { + wd, gerr := os.Getwd() + if gerr != nil { + fmt.Println(gerr) + os.Exit(1) + } + return paths2files([]string{wd}) + } + for _, p := range paths { + s, serr := os.Stat(p) + if serr != nil { + fmt.Println(serr) + os.Exit(1) + } + if s.IsDir() { + fs, err := dir2files(p, ".go") + if err != nil { + fmt.Println(err) + os.Exit(1) + } + files = append(files, fs...) + } else if path.Ext(s.Name()) == ".go" { + abs, err := filepath.Abs(p) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + files = append(files, abs) + } + } + return files +} + +func writeBinding(file string, fps []*code.Failpoint) { + if len(fps) == 0 { + return + } + fname := strings.Split(path.Base(file), ".go")[0] + ".fail.go" + out, err := os.Create(path.Join(path.Dir(file), fname)) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + // XXX: support "package main" + pkgAbsDir := path.Dir(file) + pkg := path.Base(pkgAbsDir) + pkgDir := "" + for _, srcdir := range build.Default.SrcDirs() { + if strings.HasPrefix(pkgAbsDir, srcdir) { + pkgDir = strings.Replace(pkgAbsDir, srcdir, "", 1) + break + } + } + fppath := pkg + if pkgDir == "" { + fmt.Fprintf( + os.Stderr, + "missing package for %q; using %q as failpoint path\n", + pkgAbsDir, + pkg) + } else { + fppath = pkgDir[1:] + } + code.NewBinding(pkg, fppath, fps).Write(out) + out.Close() +} + +func main() { + if len(os.Args) < 2 { + fmt.Println("not enough arguments") + os.Exit(1) + } + + var xfrm xfrmFunc + enable := false + switch os.Args[1] { + case "enable": + xfrm = code.ToFailpoints + enable = true + case "disable": + xfrm = code.ToComments + default: + fmt.Println("expected enable or disable") + os.Exit(1) + } + + files := paths2files(os.Args[2:]) + fps := [][]*code.Failpoint{} + for _, path := range files { + curfps, err := xfrmFile(xfrm, path) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + fps = append(fps, curfps) + } + + if enable { + // build runtime bindings .fail.go + for i := range files { + writeBinding(files[i], fps[i]) + } + } else { + // remove all runtime bindings + for i := range files { + fname := strings.Split(path.Base(files[i]), ".go")[0] + ".fail.go" + os.Remove(path.Join(path.Dir(files[i]), fname)) + } + } +} diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000000..72efb0353dd --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 00000000000..fcd192b849e --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000000..e6179f65e35 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000000..8c9f2049bc7 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000000..8d393e904bb --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 00000000000..150d91bc8be --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000000..adfd979fe27 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000000..dbcae905e6e --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000000..ece692ea461 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/pkg/error_code/codes.go b/vendor/github.com/pingcap/errcode/codes.go similarity index 87% rename from pkg/error_code/codes.go rename to vendor/github.com/pingcap/errcode/codes.go index a9ce721365c..a5bbcf9c64c 100644 --- a/pkg/error_code/codes.go +++ b/vendor/github.com/pingcap/errcode/codes.go @@ -39,14 +39,16 @@ func NewInvalidInputErr(err error) ErrorCode { var _ ErrorCode = (*invalidInputErr)(nil) // assert implements interface var _ HasClientData = (*invalidInputErr)(nil) // assert implements interface +var _ Causer = (*invalidInputErr)(nil) // assert implements interface -// internalError gives the code InvalidInputCode -type internalErr struct{ CodedError } +// internalError gives the code InternalCode +type internalErr struct{ StackCode } // NewInternalErr creates an internalError from an err // If the given err is an ErrorCode that is a descendant of InternalCode, // its code will be used. // This ensures the intention of sending an HTTP 50x. +// This function also records a stack trace. func NewInternalErr(err error) ErrorCode { code := InternalCode if errcode, ok := err.(ErrorCode); ok { @@ -55,11 +57,12 @@ func NewInternalErr(err error) ErrorCode { code = errCode } } - return internalErr{CodedError{GetCode: code, Err: err}} + return internalErr{NewStackCode(CodedError{GetCode: code, Err: err}, 2)} } var _ ErrorCode = (*internalErr)(nil) // assert implements interface var _ HasClientData = (*internalErr)(nil) // assert implements interface +var _ Causer = (*internalErr)(nil) // assert implements interface // notFound gives the code NotFoundCode type notFoundErr struct{ CodedError } @@ -73,6 +76,7 @@ func NewNotFoundErr(err error) ErrorCode { var _ ErrorCode = (*notFoundErr)(nil) // assert implements interface var _ HasClientData = (*notFoundErr)(nil) // assert implements interface +var _ Causer = (*notFoundErr)(nil) // assert implements interface // CodedError is a convenience to attach a code to an error and already satisfy the ErrorCode interface. // If the error is a struct, that struct will get preseneted as data to the client. @@ -101,11 +105,17 @@ func NewCodedError(err error, code Code) CodedError { var _ ErrorCode = (*CodedError)(nil) // assert implements interface var _ HasClientData = (*CodedError)(nil) // assert implements interface +var _ Causer = (*CodedError)(nil) // assert implements interface func (e CodedError) Error() string { return e.Err.Error() } +// Cause satisfies the Causer interface. +func (e CodedError) Cause() error { + return e.Err +} + // Code returns the GetCode field func (e CodedError) Code() Code { return e.GetCode diff --git a/pkg/error_code/error_code.go b/vendor/github.com/pingcap/errcode/error_code.go similarity index 71% rename from pkg/error_code/error_code.go rename to vendor/github.com/pingcap/errcode/error_code.go index 76df51d73f7..86056e7b7ab 100644 --- a/pkg/error_code/error_code.go +++ b/vendor/github.com/pingcap/errcode/error_code.go @@ -13,47 +13,57 @@ // Package errcode facilitates standardized API error codes. // The goal is that clients can reliably understand errors by checking against immutable error codes -// A Code should never be modified once committed (and released for use by clients). -// Instead a new Code should be created. +// +// This godoc documents usage. For broader context, see https://github.com/pingcap/errcode/tree/master/README.md // // Error codes are represented as strings by CodeStr (see CodeStr documentation). // // This package is designed to have few opinions and be a starting point for how you want to do errors in your project. // The main requirement is to satisfy the ErrorCode interface by attaching a Code to an Error. // See the documentation of ErrorCode. -// Additional optional interfaces HasClientData and HasOperation are provided for extensibility +// Additional optional interfaces HasClientData, HasOperation, Causer, and StackTracer are provided for extensibility // in creating structured error data representations. // // Hierarchies are supported: a Code can point to a parent. // This is used in the HTTPCode implementation to inherit HTTP codes found with MetaDataFromAncestors. // The hierarchy is present in the Code's string representation with a dot separation. // -// A few generic top-level error codes are provided here. -// You are encouraged to create your own application customized error codes rather than just using generic errors. +// A few generic top-level error codes are provided (see the variables section of the doc). +// You are encouraged to create your own error codes customized to your application rather than solely using generic errors. // -// See JSONFormat for an opinion on how to send back meta data about errors with the error data to a client. +// See NewJSONFormat for an opinion on how to send back meta data about errors with the error data to a client. // JSONFormat includes a body of response data (the "data field") that is by default the data from the Error // serialized to JSON. -// This package provides no help on versioning error data. +// +// Stack traces are automatically added by NewInternalErr and show up as the Stack field in JSONFormat. +// Errors can be grouped with Combine() and ungrouped via Errors() which show up as the Others field in JSONFormat. +// +// To extract any ErrorCodes from an error, use CodeChain(). +// This extracts error codes without information loss (using ChainContext). package errcode import ( "fmt" "net/http" "strings" + + "github.com/pingcap/errors" ) -// CodeStr is a representation of the type of a particular error. +// CodeStr is the name of the error code. +// It is a representation of the type of a particular error. // The underlying type is string rather than int. // This enhances both extensibility (avoids merge conflicts) and user-friendliness. // A CodeStr can have dot separators indicating a hierarchy. +// +// Generally a CodeStr should never be modified once used by clients. +// Instead a new CodeStr should be created. type CodeStr string func (str CodeStr) String() string { return string(str) } // A Code has a CodeStr representation. // It is attached to a Parent to find metadata from it. -// The Meta field is provided for extensibility: e.g. attaching HTTP codes. type Code struct { // codeStr does not include parent paths // The full code (with parent paths) is accessed with CodeStr @@ -111,9 +121,9 @@ func (code Code) IsAncestor(ancestorCode Code) bool { return nil != code.findAncestor(func(an Code) bool { return an == ancestorCode }) } -// MetaData is a pattern for attaching meta data to codes and inheriting it from a parent. +// MetaData is used in a pattern for attaching meta data to codes and inheriting it from a parent. // See MetaDataFromAncestors. -// This is used to attach an HTTP code to a Code. +// This is used to attach an HTTP code to a Code as meta data. type MetaData map[CodeStr]interface{} // MetaDataFromAncestors looks for meta data starting at the current code. @@ -163,7 +173,7 @@ func (code Code) HTTPCode() int { // For an application specific error with a 1:1 mapping between a go error structure and a RegisteredCode, // You probably want to use this interface directly. Example: // -// // First define a normal error type +// // First define a normal error type // type PathBlocked struct { // start uint64 `json:"start"` // end uint64 `json:"end"` @@ -177,7 +187,7 @@ func (code Code) HTTPCode() int { // // Now define the code // var PathBlockedCode = errcode.StateCode.Child("state.blocked") // -// // Now attach the code to the error type +// // Now attach the code to the error type // func (e PathBlocked) Code() Code { // return PathBlockedCode // } @@ -186,6 +196,16 @@ type ErrorCode interface { Code() Code } +// Causer allows the abstract retrieval of the underlying error. +// This is the interface that pkg/errors does not export but is considered part of the stable public API. +// TODO: export this from pkg/errors +// +// Types that wrap errors should implement this to allow viewing of the underlying error. +// Generally you would use this via pkg/errors.Cause or pkg/errors.Unwrap. +type Causer interface { + Cause() error +} + // HasClientData is used to defined how to retrieve the data portion of an ErrorCode to be returned to the client. // Otherwise the struct itself will be assumed to be all the data by the ClientData method. // This is provided for exensibility, but may be unnecessary for you. @@ -206,13 +226,22 @@ func ClientData(errCode ErrorCode) interface{} { } // JSONFormat is an opinion on how to serialize an ErrorCode to JSON. -// Msg is the string from Error(). -// The Data field is filled in by GetClientData +// * Code is the error code string (CodeStr) +// * Msg is the string from Error() and should be friendly to end users. +// * Data is the ad-hoc data filled in by GetClientData and should be consumable by clients. +// * Operation is the high-level operation that was happening at the time of the error. +// The Operation field may be missing, and the Data field may be empty. +// +// The rest of the fields may be populated sparsely depending on the application: +// * Stack is a stack trace. This is only given for internal errors. +// * Others gives other errors that occurred (perhaps due to parallel requests). type JSONFormat struct { - Data interface{} `json:"data"` - Msg string `json:"msg"` - Code CodeStr `json:"code"` - Operation string `json:"operation,omitempty"` + Code CodeStr `json:"code"` + Msg string `json:"msg"` + Data interface{} `json:"data"` + Operation string `json:"operation,omitempty"` + Stack errors.StackTrace `json:"stack,omitempty"` + Others []JSONFormat `json:"others,omitempty"` } // OperationClientData gives the results of both the ClientData and Operation functions. @@ -228,14 +257,31 @@ func OperationClientData(errCode ErrorCode) (string, interface{}) { return op, data } -// NewJSONFormat turns an ErrorCode into a JSONFormat +// NewJSONFormat turns an ErrorCode into a JSONFormat. +// If you use ErrorCodeChain first, you will ensure proper population of the Others field. func NewJSONFormat(errCode ErrorCode) JSONFormat { + // Gather up multiple errors. + // We discard any that are not ErrorCode. + errorCodes := ErrorCodes(errCode) + others := make([]JSONFormat, len(errorCodes)-1) + for i, err := range errorCodes[1:] { + others[i] = NewJSONFormat(err) + } + op, data := OperationClientData(errCode) + + var stack errors.StackTrace + if errCode.Code().IsAncestor(InternalCode) { + stack = StackTrace(errCode) + } + return JSONFormat{ Data: data, Msg: errCode.Error(), Code: errCode.Code().CodeStr(), Operation: op, + Stack: stack, + Others: others, } } diff --git a/vendor/github.com/pingcap/errcode/group.go b/vendor/github.com/pingcap/errcode/group.go new file mode 100644 index 00000000000..45b4c4dca80 --- /dev/null +++ b/vendor/github.com/pingcap/errcode/group.go @@ -0,0 +1,234 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package errcode + +import ( + "fmt" + + "github.com/pingcap/errors" +) + +// ErrorCodes return all errors (from an ErrorGroup) that are of interface ErrorCode. +// It first calls the Errors function. +func ErrorCodes(err error) []ErrorCode { + errors := errors.Errors(err) + errorCodes := make([]ErrorCode, len(errors)) + for i, errItem := range errors { + if errcode, ok := errItem.(ErrorCode); ok { + errorCodes[i] = errcode + } + } + return errorCodes +} + +// A MultiErrCode contains at least one ErrorCode and uses that to satisfy the ErrorCode and related interfaces +// The Error method will produce a string of all the errors with a semi-colon separation. +// Later code (such as a JSON response) needs to look for the ErrorGroup interface. +type MultiErrCode struct { + ErrCode ErrorCode + rest []error +} + +// Combine constructs a MultiErrCode. +// It will combine any other MultiErrCode into just one MultiErrCode. +// This is "horizontal" composition. +// If you want normal "vertical" composition use BuildChain. +func Combine(initial ErrorCode, others ...ErrorCode) MultiErrCode { + var rest []error + if group, ok := initial.(errors.ErrorGroup); ok { + rest = group.Errors() + } + for _, other := range others { + rest = append(rest, errors.Errors(other)...) + } + return MultiErrCode{ + ErrCode: initial, + rest: rest, + } +} + +var _ ErrorCode = (*MultiErrCode)(nil) // assert implements interface +var _ HasClientData = (*MultiErrCode)(nil) // assert implements interface +var _ Causer = (*MultiErrCode)(nil) // assert implements interface +var _ errors.ErrorGroup = (*MultiErrCode)(nil) // assert implements interface +var _ fmt.Formatter = (*MultiErrCode)(nil) // assert implements interface + +func (e MultiErrCode) Error() string { + output := e.ErrCode.Error() + for _, item := range e.rest { + output += "; " + item.Error() + } + return output +} + +// Errors fullfills the ErrorGroup inteface +func (e MultiErrCode) Errors() []error { + return append([]error{e.ErrCode.(error)}, e.rest...) +} + +// Code fullfills the ErrorCode inteface +func (e MultiErrCode) Code() Code { + return e.ErrCode.Code() +} + +// Cause fullfills the Causer inteface +func (e MultiErrCode) Cause() error { + return e.ErrCode +} + +// GetClientData fullfills the HasClientData inteface +func (e MultiErrCode) GetClientData() interface{} { + return ClientData(e.ErrCode) +} + +// CodeChain resolves an error chain down to a chain of just error codes +// Any ErrorGroups found are converted to a MultiErrCode. +// Passed over error inforation is retained using ChainContext. +// If a code was overidden in the chain, it will show up as a MultiErrCode. +func CodeChain(err error) ErrorCode { + var code ErrorCode + currentErr := err + chainErrCode := func(errcode ErrorCode) { + if errcode.(error) != currentErr { + if chained, ok := errcode.(ChainContext); ok { + // Perhaps this is a hack because we should be passing the context to recursive CodeChain calls + chained.Top = currentErr + errcode = chained + } else { + errcode = ChainContext{currentErr, errcode} + } + } + if code == nil { + code = errcode + } else { + code = MultiErrCode{code, []error{code.(error), errcode.(error)}} + } + currentErr = errcode.(error) + } + + for err != nil { + if errcode, ok := err.(ErrorCode); ok { + if code == nil || code.Code() != errcode.Code() { + chainErrCode(errcode) + } + } else if eg, ok := err.(errors.ErrorGroup); ok { + group := []ErrorCode{} + for _, errItem := range eg.Errors() { + if itemCode := CodeChain(errItem); itemCode != nil { + group = append(group, itemCode) + } + } + if len(group) > 0 { + var codeGroup ErrorCode + if len(group) == 1 { + codeGroup = group[0] + } else { + codeGroup = Combine(group[0], group[1:]...) + } + chainErrCode(codeGroup) + } + } + err = errors.Unwrap(err) + } + + return code +} + +// ChainContext is returned by ErrorCodeChain +// to retain the full wrapped error message of the error chain. +// If you annotated an ErrorCode with additional information, it is retained in the Top field. +// The Top field is used for the Error() and Cause() methods. +type ChainContext struct { + Top error + ErrCode ErrorCode +} + +// Code satisfies the ErrorCode interface +func (err ChainContext) Code() Code { + return err.ErrCode.Code() +} + +// Error satisfies the Error interface +func (err ChainContext) Error() string { + return err.Top.Error() +} + +// Cause satisfies the Causer interface +func (err ChainContext) Cause() error { + if wrapped := errors.Unwrap(err.Top); wrapped != nil { + return wrapped + } + return err.ErrCode +} + +// GetClientData satisfies the HasClientData interface +func (err ChainContext) GetClientData() interface{} { + return ClientData(err.ErrCode) +} + +var _ ErrorCode = (*ChainContext)(nil) +var _ HasClientData = (*ChainContext)(nil) +var _ Causer = (*ChainContext)(nil) + +// Format implements the Formatter interface +func (err ChainContext) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", err.ErrCode) + if errors.HasStack(err.ErrCode) { + fmt.Fprintf(s, "%v", err.Top) + } else { + fmt.Fprintf(s, "%+v", err.Top) + } + return + } + if s.Flag('#') { + fmt.Fprintf(s, "ChainContext{Code: %#v, Top: %#v}", err.ErrCode, err.Top) + return + } + fallthrough + case 's': + fmt.Fprintf(s, "Code: %s. Top Error: %s", err.ErrCode.Code().CodeStr(), err.Top) + case 'q': + fmt.Fprintf(s, "Code: %q. Top Error: %q", err.ErrCode.Code().CodeStr(), err.Top) + } +} + +// Format implements the Formatter interface +func (e MultiErrCode) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", e.ErrCode) + if errors.HasStack(e.ErrCode) { + for _, nextErr := range e.rest { + fmt.Fprintf(s, "%v", nextErr) + } + } else { + for _, nextErr := range e.rest { + fmt.Fprintf(s, "%+v", nextErr) + } + } + return + } + fallthrough + case 's': + fmt.Fprintf(s, "%s\n", e.ErrCode) + fmt.Fprintf(s, "%s", e.rest) + case 'q': + fmt.Fprintf(s, "%q\n", e.ErrCode) + fmt.Fprintf(s, "%q\n", e.rest) + } +} diff --git a/pkg/error_code/operation.go b/vendor/github.com/pingcap/errcode/operation.go similarity index 90% rename from pkg/error_code/operation.go rename to vendor/github.com/pingcap/errcode/operation.go index 7e796d2ef25..30200272f72 100644 --- a/pkg/error_code/operation.go +++ b/vendor/github.com/pingcap/errcode/operation.go @@ -48,14 +48,19 @@ func (e EmbedOp) GetOperation() string { return e.Op } -// OpErrCode is an ErrorCode with an "Operation" field attached. -// This may be used as a convenience to record the operation information for the error. +// OpErrCode is an ErrorCode with an Operation field attached. +// This can be conveniently constructed with Op() and AddTo() to record the operation information for the error. // However, it isn't required to be used, see the HasOperation documentation for alternatives. type OpErrCode struct { Operation string Err ErrorCode } +// Cause satisfies the Causer interface +func (e OpErrCode) Cause() error { + return e.Err +} + // Error prefixes the operation to the underlying Err Error. func (e OpErrCode) Error() string { return e.Operation + ": " + e.Err.Error() @@ -66,7 +71,7 @@ func (e OpErrCode) GetOperation() string { return e.Operation } -// Code returns the unerlying Code of Err. +// Code returns the underlying Code of Err. func (e OpErrCode) Code() Code { return e.Err.Code() } @@ -79,6 +84,7 @@ func (e OpErrCode) GetClientData() interface{} { var _ ErrorCode = (*OpErrCode)(nil) // assert implements interface var _ HasClientData = (*OpErrCode)(nil) // assert implements interface var _ HasOperation = (*OpErrCode)(nil) // assert implements interface +var _ Causer = (*OpErrCode)(nil) // assert implements interface // AddOp is constructed by Op. It allows method chaining with AddTo. type AddOp func(ErrorCode) OpErrCode @@ -94,7 +100,7 @@ func (addOp AddOp) AddTo(err ErrorCode) OpErrCode { // op := errcode.Op("path.move.x") // if start < obstable && obstacle < end { // return op.AddTo(PathBlocked{start, end, obstacle}) -// } +// } // func Op(operation string) AddOp { return func(err ErrorCode) OpErrCode { diff --git a/vendor/github.com/pingcap/errcode/stack.go b/vendor/github.com/pingcap/errcode/stack.go new file mode 100644 index 00000000000..13343162d60 --- /dev/null +++ b/vendor/github.com/pingcap/errcode/stack.go @@ -0,0 +1,88 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package errcode + +import ( + "github.com/pingcap/errors" +) + +// StackTrace retrieves the errors.StackTrace from the error if it is present. +// If there is not StackTrace it will return nil +// +// StackTrace looks to see if the error is a StackTracer or if a Causer of the error is a StackTracer. +// It will return the stack trace from the deepest error it can find. +func StackTrace(err error) errors.StackTrace { + if tracer := errors.GetStackTracer(err); tracer != nil { + return tracer.StackTrace() + } + return nil +} + +// StackCode is an ErrorCode with stack trace information attached. +// This may be used as a convenience to record the strack trace information for the error. +// Generally stack traces aren't needed for user errors, but they are provided by NewInternalErr. +// Its also possible to define your own structures that satisfy the StackTracer interface. +type StackCode struct { + Err ErrorCode + GetStack errors.StackTracer +} + +// StackTrace fulfills the StackTracer interface +func (e StackCode) StackTrace() errors.StackTrace { + return e.GetStack.StackTrace() +} + +// NewStackCode constructs a StackCode, which is an ErrorCode with stack trace information +// The second variable is an optional stack position gets rid of information about function calls to construct the stack trace. +// It is defaulted to 1 to remove this function call. +// +// NewStackCode first looks at the underlying error chain to see if it already has a StackTrace. +// If so, that StackTrace is used. +func NewStackCode(err ErrorCode, position ...int) StackCode { + stackPosition := 1 + if len(position) > 0 { + stackPosition = position[0] + } + + // if there is an existing trace, take that: it should be deeper + if tracer := errors.GetStackTracer(err); tracer != nil { + return StackCode{Err: err, GetStack: tracer} + } + + return StackCode{Err: err, GetStack: errors.NewStack(stackPosition)} +} + +// Cause satisfies the Causer interface +func (e StackCode) Cause() error { + return e.Err +} + +// Error ignores the stack and gives the underlying Err Error. +func (e StackCode) Error() string { + return e.Err.Error() +} + +// Code returns the underlying Code of Err. +func (e StackCode) Code() Code { + return e.Err.Code() +} + +// GetClientData returns the ClientData of the underlying Err. +func (e StackCode) GetClientData() interface{} { + return ClientData(e.Err) +} + +var _ ErrorCode = (*StackCode)(nil) // assert implements interface +var _ HasClientData = (*StackCode)(nil) // assert implements interface +var _ Causer = (*StackCode)(nil) // assert implements interface diff --git a/vendor/github.com/pingcap/errors/LICENSE b/vendor/github.com/pingcap/errors/LICENSE new file mode 100644 index 00000000000..835ba3e755c --- /dev/null +++ b/vendor/github.com/pingcap/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pingcap/errors/errors.go b/vendor/github.com/pingcap/errors/errors.go new file mode 100644 index 00000000000..2e1d3f62896 --- /dev/null +++ b/vendor/github.com/pingcap/errors/errors.go @@ -0,0 +1,324 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Annotate function returns a new error that adds context to the +// original error by recording a stack trace at the point Annotate is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Annotate(err, "read failed") +// } +// +// If additional control is required the errors.AddStack and errors.WithMessage +// functions destructure errors.Annotate into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Annotate constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Annotate to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// errors.Unwrap is also available: this will retrieve the next error in the chain. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Annotate, and Annotatef record a stack trace at the point they are invoked. +// This information can be retrieved with the StackTracer interface that returns +// a StackTrace. Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if stacked := errors.GetStackTracer(err); stacked != nil { +// for _, f := range stacked.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// See the documentation for Frame.Format for more details. +// +// errors.Find can be used to search for an error in the error chain. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// StackTraceAware is an optimization to avoid repetitive traversals of an error chain. +// HasStack checks for this marker first. +// Annotate/Wrap and Annotatef/Wrapf will produce this marker. +type StackTraceAware interface { + HasStack() bool +} + +// HasStack tells whether a StackTracer exists in the error chain +func HasStack(err error) bool { + if errWithStack, ok := err.(StackTraceAware); ok { + return errWithStack.HasStack() + } + return GetStackTracer(err) != nil +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +// +// For most use cases this is deprecated and AddStack should be used (which will ensure just one stack trace). +// However, one may want to use this in some situations, for example to create a 2nd trace across a goroutine. +func WithStack(err error) error { + if err == nil { + return nil + } + + return &withStack{ + err, + callers(), + } +} + +// AddStack is similar to WithStack. +// However, it will first check with HasStack to see if a stack trace already exists in the causer chain before creating another one. +func AddStack(err error) error { + if HasStack(err) { + return err + } + return WithStack(err) +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +// +// For most use cases this is deprecated in favor of Annotate. +// Annotate avoids creating duplicate stack traces. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + hasStack := HasStack(err) + err = &withMessage{ + cause: err, + msg: message, + causeHasStack: hasStack, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +// +// For most use cases this is deprecated in favor of Annotatef. +// Annotatef avoids creating duplicate stack traces. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + hasStack := HasStack(err) + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + causeHasStack: hasStack, + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + causeHasStack: HasStack(err), + } +} + +type withMessage struct { + cause error + msg string + causeHasStack bool +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } +func (w *withMessage) HasStack() bool { return w.causeHasStack } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + cause := Unwrap(err) + if cause == nil { + return err + } + return Cause(cause) +} + +// Unwrap uses causer to return the next error in the chain or nil. +// This goes one-level deeper, whereas Cause goes as far as possible +func Unwrap(err error) error { + type causer interface { + Cause() error + } + if unErr, ok := err.(causer); ok { + return unErr.Cause() + } + return nil +} + +// Find an error in the chain that matches a test function. +// returns nil if no error is found. +func Find(origErr error, test func(error) bool) error { + var foundErr error + WalkDeep(origErr, func(err error) bool { + if test(err) { + foundErr = err + return true + } + return false + }) + return foundErr +} diff --git a/vendor/github.com/pingcap/errors/group.go b/vendor/github.com/pingcap/errors/group.go new file mode 100644 index 00000000000..e5a969ab76f --- /dev/null +++ b/vendor/github.com/pingcap/errors/group.go @@ -0,0 +1,42 @@ +package errors + +// ErrorGroup is an interface for multiple errors that are not a chain. +// This happens for example when executing multiple operations in parallel. +type ErrorGroup interface { + Errors() []error +} + +// Errors uses the ErrorGroup interface to return a slice of errors. +// If the ErrorGroup interface is not implemented it returns an array containing just the given error. +func Errors(err error) []error { + if eg, ok := err.(ErrorGroup); ok { + return eg.Errors() + } + return []error{err} +} + +// WalkDeep does a depth-first traversal of all errors. +// Any ErrorGroup is traversed (after going deep). +// The visitor function can return true to end the traversal early +// In that case, WalkDeep will return true, otherwise false. +func WalkDeep(err error, visitor func(err error) bool) bool { + // Go deep + unErr := err + for unErr != nil { + if done := visitor(unErr); done { + return true + } + unErr = Unwrap(unErr) + } + + // Go wide + if group, ok := err.(ErrorGroup); ok { + for _, err := range group.Errors() { + if early := WalkDeep(err, visitor); early { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/pingcap/errors/juju_adaptor.go b/vendor/github.com/pingcap/errors/juju_adaptor.go new file mode 100644 index 00000000000..0b20f57370c --- /dev/null +++ b/vendor/github.com/pingcap/errors/juju_adaptor.go @@ -0,0 +1,79 @@ +package errors + +import ( + "fmt" +) + +// ==================== juju adaptor start ======================== + +// Trace just calls AddStack. +func Trace(err error) error { + return AddStack(err) +} + +// Annotate adds a message and ensures there is a stack trace. +func Annotate(err error, message string) error { + if err == nil { + return nil + } + hasStack := HasStack(err) + err = &withMessage{ + cause: err, + msg: message, + causeHasStack: hasStack, + } + if hasStack { + return err + } + return &withStack{ + err, + callers(), + } +} + +// Annotatef adds a message and ensures there is a stack trace. +func Annotatef(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + hasStack := HasStack(err) + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + causeHasStack: hasStack, + } + if hasStack { + return err + } + return &withStack{ + err, + callers(), + } +} + +// ErrorStack will format a stack trace if it is available, otherwise it will be Error() +// If the error is nil, the empty string is returned +// Note that this just calls fmt.Sprintf("%+v", err) +func ErrorStack(err error) string { + if err == nil { + return "" + } + return fmt.Sprintf("%+v", err) +} + +// NotFoundf represents an error with not found message. +func NotFoundf(format string, args ...interface{}) error { + return Errorf(format+" not found", args...) +} + +// BadRequestf represents an error with bad request message. +func BadRequestf(format string, args ...interface{}) error { + return Errorf(format+" bad request", args...) +} + +// NotSupportedf represents an error with not supported message. +func NotSupportedf(format string, args ...interface{}) error { + return Errorf(format+" not supported", args...) +} + +// ==================== juju adaptor end ======================== diff --git a/vendor/github.com/pingcap/errors/stack.go b/vendor/github.com/pingcap/errors/stack.go new file mode 100644 index 00000000000..bb1e6a84f33 --- /dev/null +++ b/vendor/github.com/pingcap/errors/stack.go @@ -0,0 +1,226 @@ +package errors + +import ( + "bytes" + "fmt" + "io" + "path" + "runtime" + "strconv" + "strings" +) + +// StackTracer retrieves the StackTrace +// Generally you would want to use the GetStackTracer function to do that. +type StackTracer interface { + StackTrace() StackTrace +} + +// GetStackTracer will return the first StackTracer in the causer chain. +// This function is used by AddStack to avoid creating redundant stack traces. +// +// You can also use the StackTracer interface on the returned error to get the stack trace. +func GetStackTracer(origErr error) StackTracer { + var stacked StackTracer + WalkDeep(origErr, func(err error) bool { + if stackTracer, ok := err.(StackTracer); ok { + stacked = stackTracer + return true + } + return false + }) + return stacked +} + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + f.format(s, s, verb) +} + +// format allows stack trace printing calls to be made with a bytes.Buffer. +func (f Frame) format(w io.Writer, s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(w, "unknown") + } else { + file, _ := fn.FileLine(pc) + io.WriteString(w, fn.Name()) + io.WriteString(w, "\n\t") + io.WriteString(w, file) + } + default: + io.WriteString(w, path.Base(f.file())) + } + case 'd': + io.WriteString(w, strconv.Itoa(f.line())) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(w, funcname(name)) + case 'v': + f.format(w, s, 's') + io.WriteString(w, ":") + f.format(w, s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. +func (st StackTrace) Format(s fmt.State, verb rune) { + var b bytes.Buffer + switch verb { + case 'v': + switch { + case s.Flag('+'): + b.Grow(len(st) * stackMinLen) + for _, fr := range st { + b.WriteByte('\n') + fr.format(&b, s, verb) + } + case s.Flag('#'): + fmt.Fprintf(&b, "%#v", []Frame(st)) + default: + st.formatSlice(&b, s, verb) + } + case 's': + st.formatSlice(&b, s, verb) + } + io.Copy(s, &b) +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(b *bytes.Buffer, s fmt.State, verb rune) { + b.WriteByte('[') + if len(st) == 0 { + b.WriteByte(']') + return + } + + b.Grow(len(st) * (stackMinLen / 4)) + st[0].format(b, s, verb) + for _, fr := range st[1:] { + b.WriteByte(' ') + fr.format(b, s, verb) + } + b.WriteByte(']') +} + +// stackMinLen is a best-guess at the minimum length of a stack trace. It +// doesn't need to be exact, just give a good enough head start for the buffer +// to avoid the expensive early growth. +const stackMinLen = 96 + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + var b bytes.Buffer + b.Grow(len(*s) * stackMinLen) + for _, pc := range *s { + f := Frame(pc) + b.WriteByte('\n') + f.format(&b, st, 'v') + } + io.Copy(st, &b) + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + return callersSkip(4) +} + +func callersSkip(skip int) *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(skip, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +// NewStack is for library implementers that want to generate a stack trace. +// Normally you should insted use AddStack to get an error with a stack trace. +// +// The result of this function can be turned into a stack trace by calling .StackTrace() +// +// This function takes an argument for the number of stack frames to skip. +// This avoids putting stack generation function calls like this one in the stack trace. +// A value of 0 will give you the line that called NewStack(0) +// A library author wrapping this in their own function will want to use a value of at least 1. +func NewStack(skip int) StackTracer { + return callersSkip(skip + 3) +} diff --git a/vendor/github.com/pingcap/kvproto/pkg/eraftpb/eraftpb.pb.go b/vendor/github.com/pingcap/kvproto/pkg/eraftpb/eraftpb.pb.go index 5d350673510..de7d282d714 100644 --- a/vendor/github.com/pingcap/kvproto/pkg/eraftpb/eraftpb.pb.go +++ b/vendor/github.com/pingcap/kvproto/pkg/eraftpb/eraftpb.pb.go @@ -1,22 +1,6 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: eraftpb.proto -// DO NOT EDIT! - -/* - Package eraftpb is a generated protocol buffer package. - - It is generated from these files: - eraftpb.proto - - It has these top-level messages: - Entry - SnapshotMetadata - Snapshot - Message - HardState - ConfState - ConfChange -*/ + package eraftpb import ( @@ -57,7 +41,9 @@ var EntryType_value = map[string]int32{ func (x EntryType) String() string { return proto.EnumName(EntryType_name, int32(x)) } -func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{0} } +func (EntryType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{0} +} type MessageType int32 @@ -129,7 +115,9 @@ var MessageType_value = map[string]int32{ func (x MessageType) String() string { return proto.EnumName(MessageType_name, int32(x)) } -func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{1} } +func (MessageType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{1} +} type ConfChangeType int32 @@ -153,7 +141,9 @@ var ConfChangeType_value = map[string]int32{ func (x ConfChangeType) String() string { return proto.EnumName(ConfChangeType_name, int32(x)) } -func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{2} } +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{2} +} type Entry struct { EntryType EntryType `protobuf:"varint,1,opt,name=entry_type,json=entryType,proto3,enum=eraftpb.EntryType" json:"entry_type,omitempty"` @@ -163,13 +153,44 @@ type Entry struct { Context []byte `protobuf:"bytes,6,opt,name=context,proto3" json:"context,omitempty"` // Deprecated! It is kept for backward compatibility. // TODO: remove it in the next major release. - SyncLog bool `protobuf:"varint,5,opt,name=sync_log,json=syncLog,proto3" json:"sync_log,omitempty"` + SyncLog bool `protobuf:"varint,5,opt,name=sync_log,json=syncLog,proto3" json:"sync_log,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{0} +} +func (m *Entry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Entry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Entry.Merge(dst, src) +} +func (m *Entry) XXX_Size() int { + return m.Size() +} +func (m *Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Entry.DiscardUnknown(m) } -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} -func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{0} } +var xxx_messageInfo_Entry proto.InternalMessageInfo func (m *Entry) GetEntryType() EntryType { if m != nil { @@ -214,15 +235,46 @@ func (m *Entry) GetSyncLog() bool { } type SnapshotMetadata struct { - ConfState *ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state,omitempty"` - Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` - Term uint64 `protobuf:"varint,3,opt,name=term,proto3" json:"term,omitempty"` + ConfState *ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state,omitempty"` + Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Term uint64 `protobuf:"varint,3,opt,name=term,proto3" json:"term,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{1} +} +func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SnapshotMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotMetadata.Merge(dst, src) +} +func (m *SnapshotMetadata) XXX_Size() int { + return m.Size() +} +func (m *SnapshotMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m) } -func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } -func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } -func (*SnapshotMetadata) ProtoMessage() {} -func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{1} } +var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo func (m *SnapshotMetadata) GetConfState() *ConfState { if m != nil { @@ -246,14 +298,45 @@ func (m *SnapshotMetadata) GetTerm() uint64 { } type Snapshot struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - Metadata *SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Metadata *SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{2} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(dst, src) +} +func (m *Snapshot) XXX_Size() int { + return m.Size() +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) } -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{2} } +var xxx_messageInfo_Snapshot proto.InternalMessageInfo func (m *Snapshot) GetData() []byte { if m != nil { @@ -270,24 +353,55 @@ func (m *Snapshot) GetMetadata() *SnapshotMetadata { } type Message struct { - MsgType MessageType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=eraftpb.MessageType" json:"msg_type,omitempty"` - To uint64 `protobuf:"varint,2,opt,name=to,proto3" json:"to,omitempty"` - From uint64 `protobuf:"varint,3,opt,name=from,proto3" json:"from,omitempty"` - Term uint64 `protobuf:"varint,4,opt,name=term,proto3" json:"term,omitempty"` - LogTerm uint64 `protobuf:"varint,5,opt,name=log_term,json=logTerm,proto3" json:"log_term,omitempty"` - Index uint64 `protobuf:"varint,6,opt,name=index,proto3" json:"index,omitempty"` - Entries []*Entry `protobuf:"bytes,7,rep,name=entries" json:"entries,omitempty"` - Commit uint64 `protobuf:"varint,8,opt,name=commit,proto3" json:"commit,omitempty"` - Snapshot *Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot,omitempty"` - Reject bool `protobuf:"varint,10,opt,name=reject,proto3" json:"reject,omitempty"` - RejectHint uint64 `protobuf:"varint,11,opt,name=reject_hint,json=rejectHint,proto3" json:"reject_hint,omitempty"` - Context []byte `protobuf:"bytes,12,opt,name=context,proto3" json:"context,omitempty"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{3} } + MsgType MessageType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=eraftpb.MessageType" json:"msg_type,omitempty"` + To uint64 `protobuf:"varint,2,opt,name=to,proto3" json:"to,omitempty"` + From uint64 `protobuf:"varint,3,opt,name=from,proto3" json:"from,omitempty"` + Term uint64 `protobuf:"varint,4,opt,name=term,proto3" json:"term,omitempty"` + LogTerm uint64 `protobuf:"varint,5,opt,name=log_term,json=logTerm,proto3" json:"log_term,omitempty"` + Index uint64 `protobuf:"varint,6,opt,name=index,proto3" json:"index,omitempty"` + Entries []*Entry `protobuf:"bytes,7,rep,name=entries" json:"entries,omitempty"` + Commit uint64 `protobuf:"varint,8,opt,name=commit,proto3" json:"commit,omitempty"` + Snapshot *Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot,omitempty"` + Reject bool `protobuf:"varint,10,opt,name=reject,proto3" json:"reject,omitempty"` + RejectHint uint64 `protobuf:"varint,11,opt,name=reject_hint,json=rejectHint,proto3" json:"reject_hint,omitempty"` + Context []byte `protobuf:"bytes,12,opt,name=context,proto3" json:"context,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{3} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo func (m *Message) GetMsgType() MessageType { if m != nil { @@ -374,15 +488,46 @@ func (m *Message) GetContext() []byte { } type HardState struct { - Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` - Vote uint64 `protobuf:"varint,2,opt,name=vote,proto3" json:"vote,omitempty"` - Commit uint64 `protobuf:"varint,3,opt,name=commit,proto3" json:"commit,omitempty"` + Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Vote uint64 `protobuf:"varint,2,opt,name=vote,proto3" json:"vote,omitempty"` + Commit uint64 `protobuf:"varint,3,opt,name=commit,proto3" json:"commit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{4} +} +func (m *HardState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HardState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *HardState) XXX_Merge(src proto.Message) { + xxx_messageInfo_HardState.Merge(dst, src) +} +func (m *HardState) XXX_Size() int { + return m.Size() +} +func (m *HardState) XXX_DiscardUnknown() { + xxx_messageInfo_HardState.DiscardUnknown(m) } -func (m *HardState) Reset() { *m = HardState{} } -func (m *HardState) String() string { return proto.CompactTextString(m) } -func (*HardState) ProtoMessage() {} -func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{4} } +var xxx_messageInfo_HardState proto.InternalMessageInfo func (m *HardState) GetTerm() uint64 { if m != nil { @@ -406,14 +551,45 @@ func (m *HardState) GetCommit() uint64 { } type ConfState struct { - Nodes []uint64 `protobuf:"varint,1,rep,packed,name=nodes" json:"nodes,omitempty"` - Learners []uint64 `protobuf:"varint,2,rep,packed,name=learners" json:"learners,omitempty"` + Nodes []uint64 `protobuf:"varint,1,rep,packed,name=nodes" json:"nodes,omitempty"` + Learners []uint64 `protobuf:"varint,2,rep,packed,name=learners" json:"learners,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{5} +} +func (m *ConfState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConfState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ConfState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfState.Merge(dst, src) +} +func (m *ConfState) XXX_Size() int { + return m.Size() +} +func (m *ConfState) XXX_DiscardUnknown() { + xxx_messageInfo_ConfState.DiscardUnknown(m) } -func (m *ConfState) Reset() { *m = ConfState{} } -func (m *ConfState) String() string { return proto.CompactTextString(m) } -func (*ConfState) ProtoMessage() {} -func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{5} } +var xxx_messageInfo_ConfState proto.InternalMessageInfo func (m *ConfState) GetNodes() []uint64 { if m != nil { @@ -430,16 +606,47 @@ func (m *ConfState) GetLearners() []uint64 { } type ConfChange struct { - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - ChangeType ConfChangeType `protobuf:"varint,2,opt,name=change_type,json=changeType,proto3,enum=eraftpb.ConfChangeType" json:"change_type,omitempty"` - NodeId uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - Context []byte `protobuf:"bytes,4,opt,name=context,proto3" json:"context,omitempty"` + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ChangeType ConfChangeType `protobuf:"varint,2,opt,name=change_type,json=changeType,proto3,enum=eraftpb.ConfChangeType" json:"change_type,omitempty"` + NodeId uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Context []byte `protobuf:"bytes,4,opt,name=context,proto3" json:"context,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_ebbdfaa85573a86c, []int{6} +} +func (m *ConfChange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ConfChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfChange.Merge(dst, src) +} +func (m *ConfChange) XXX_Size() int { + return m.Size() +} +func (m *ConfChange) XXX_DiscardUnknown() { + xxx_messageInfo_ConfChange.DiscardUnknown(m) } -func (m *ConfChange) Reset() { *m = ConfChange{} } -func (m *ConfChange) String() string { return proto.CompactTextString(m) } -func (*ConfChange) ProtoMessage() {} -func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorEraftpb, []int{6} } +var xxx_messageInfo_ConfChange proto.InternalMessageInfo func (m *ConfChange) GetId() uint64 { if m != nil { @@ -533,6 +740,9 @@ func (m *Entry) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintEraftpb(dAtA, i, uint64(len(m.Context))) i += copy(dAtA[i:], m.Context) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -571,6 +781,9 @@ func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintEraftpb(dAtA, i, uint64(m.Term)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -605,6 +818,9 @@ func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { } i += n2 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -701,6 +917,9 @@ func (m *Message) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintEraftpb(dAtA, i, uint64(len(m.Context))) i += copy(dAtA[i:], m.Context) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -734,6 +953,9 @@ func (m *HardState) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintEraftpb(dAtA, i, uint64(m.Commit)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -786,6 +1008,9 @@ func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintEraftpb(dAtA, i, uint64(j6)) i += copy(dAtA[i:], dAtA7[:j6]) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -825,27 +1050,12 @@ func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintEraftpb(dAtA, i, uint64(len(m.Context))) i += copy(dAtA[i:], m.Context) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } -func encodeFixed64Eraftpb(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Eraftpb(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintEraftpb(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -856,6 +1066,9 @@ func encodeVarintEraftpb(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Entry) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.EntryType != 0 { @@ -878,10 +1091,16 @@ func (m *Entry) Size() (n int) { if l > 0 { n += 1 + l + sovEraftpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *SnapshotMetadata) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ConfState != nil { @@ -894,10 +1113,16 @@ func (m *SnapshotMetadata) Size() (n int) { if m.Term != 0 { n += 1 + sovEraftpb(uint64(m.Term)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Snapshot) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Data) @@ -908,10 +1133,16 @@ func (m *Snapshot) Size() (n int) { l = m.Metadata.Size() n += 1 + l + sovEraftpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Message) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MsgType != 0 { @@ -955,10 +1186,16 @@ func (m *Message) Size() (n int) { if l > 0 { n += 1 + l + sovEraftpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *HardState) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Term != 0 { @@ -970,10 +1207,16 @@ func (m *HardState) Size() (n int) { if m.Commit != 0 { n += 1 + sovEraftpb(uint64(m.Commit)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ConfState) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Nodes) > 0 { @@ -990,10 +1233,16 @@ func (m *ConfState) Size() (n int) { } n += 1 + sovEraftpb(uint64(l)) + l } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ConfChange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Id != 0 { @@ -1009,6 +1258,9 @@ func (m *ConfChange) Size() (n int) { if l > 0 { n += 1 + l + sovEraftpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1205,6 +1457,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1326,6 +1579,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1440,6 +1694,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1757,6 +2012,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1864,6 +2120,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1903,7 +2160,24 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType == 2 { + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -1944,7 +2218,11 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } m.Nodes = append(m.Nodes, v) } - } else if wireType == 0 { + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + case 2: + if wireType == 0 { var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -1960,12 +2238,8 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { break } } - m.Nodes = append(m.Nodes, v) - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - case 2: - if wireType == 2 { + m.Learners = append(m.Learners, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -2006,23 +2280,6 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } m.Learners = append(m.Learners, v) } - } else if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEraftpb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Learners = append(m.Learners, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) } @@ -2038,6 +2295,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2176,6 +2434,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2290,9 +2549,9 @@ var ( ErrIntOverflowEraftpb = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("eraftpb.proto", fileDescriptorEraftpb) } +func init() { proto.RegisterFile("eraftpb.proto", fileDescriptor_eraftpb_ebbdfaa85573a86c) } -var fileDescriptorEraftpb = []byte{ +var fileDescriptor_eraftpb_ebbdfaa85573a86c = []byte{ // 821 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x55, 0xcd, 0x6e, 0xe4, 0x44, 0x10, 0x8e, 0x3d, 0x3f, 0xf6, 0x94, 0x93, 0x49, 0xa7, 0x08, 0xbb, 0xce, 0x4a, 0x84, 0xd1, 0x9c, diff --git a/vendor/github.com/pingcap/kvproto/pkg/metapb/metapb.pb.go b/vendor/github.com/pingcap/kvproto/pkg/metapb/metapb.pb.go index 3479ceed135..9c855f14749 100644 --- a/vendor/github.com/pingcap/kvproto/pkg/metapb/metapb.pb.go +++ b/vendor/github.com/pingcap/kvproto/pkg/metapb/metapb.pb.go @@ -1,21 +1,6 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: metapb.proto -// DO NOT EDIT! - -/* - Package metapb is a generated protocol buffer package. - - It is generated from these files: - metapb.proto - - It has these top-level messages: - Cluster - StoreLabel - Store - RegionEpoch - Region - Peer -*/ + package metapb import ( @@ -24,6 +9,8 @@ import ( "math" proto "github.com/golang/protobuf/proto" + + _ "github.com/gogo/protobuf/gogoproto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -59,19 +46,52 @@ var StoreState_value = map[string]int32{ func (x StoreState) String() string { return proto.EnumName(StoreState_name, int32(x)) } -func (StoreState) EnumDescriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{0} } +func (StoreState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metapb_fa106af9a8e7a522, []int{0} +} type Cluster struct { Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` // max peer count for a region. // pd will do the auto-balance if region peer count mismatches. - MaxPeerCount uint32 `protobuf:"varint,2,opt,name=max_peer_count,json=maxPeerCount,proto3" json:"max_peer_count,omitempty"` + MaxPeerCount uint32 `protobuf:"varint,2,opt,name=max_peer_count,json=maxPeerCount,proto3" json:"max_peer_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Cluster) Reset() { *m = Cluster{} } -func (m *Cluster) String() string { return proto.CompactTextString(m) } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{0} } +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_fa106af9a8e7a522, []int{0} +} +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(dst, src) +} +func (m *Cluster) XXX_Size() int { + return m.Size() +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo func (m *Cluster) GetId() uint64 { if m != nil { @@ -89,14 +109,45 @@ func (m *Cluster) GetMaxPeerCount() uint32 { // Case insensitive key/value for replica constraints. type StoreLabel struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreLabel) Reset() { *m = StoreLabel{} } +func (m *StoreLabel) String() string { return proto.CompactTextString(m) } +func (*StoreLabel) ProtoMessage() {} +func (*StoreLabel) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_fa106af9a8e7a522, []int{1} +} +func (m *StoreLabel) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreLabel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreLabel.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreLabel) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreLabel.Merge(dst, src) +} +func (m *StoreLabel) XXX_Size() int { + return m.Size() +} +func (m *StoreLabel) XXX_DiscardUnknown() { + xxx_messageInfo_StoreLabel.DiscardUnknown(m) } -func (m *StoreLabel) Reset() { *m = StoreLabel{} } -func (m *StoreLabel) String() string { return proto.CompactTextString(m) } -func (*StoreLabel) ProtoMessage() {} -func (*StoreLabel) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{1} } +var xxx_messageInfo_StoreLabel proto.InternalMessageInfo func (m *StoreLabel) GetKey() string { if m != nil { @@ -113,17 +164,48 @@ func (m *StoreLabel) GetValue() string { } type Store struct { - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - State StoreState `protobuf:"varint,3,opt,name=state,proto3,enum=metapb.StoreState" json:"state,omitempty"` - Labels []*StoreLabel `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"` - Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + State StoreState `protobuf:"varint,3,opt,name=state,proto3,enum=metapb.StoreState" json:"state,omitempty"` + Labels []*StoreLabel `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"` + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Store) Reset() { *m = Store{} } -func (m *Store) String() string { return proto.CompactTextString(m) } -func (*Store) ProtoMessage() {} -func (*Store) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{2} } +func (m *Store) Reset() { *m = Store{} } +func (m *Store) String() string { return proto.CompactTextString(m) } +func (*Store) ProtoMessage() {} +func (*Store) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_fa106af9a8e7a522, []int{2} +} +func (m *Store) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Store) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Store.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Store) XXX_Merge(src proto.Message) { + xxx_messageInfo_Store.Merge(dst, src) +} +func (m *Store) XXX_Size() int { + return m.Size() +} +func (m *Store) XXX_DiscardUnknown() { + xxx_messageInfo_Store.DiscardUnknown(m) +} + +var xxx_messageInfo_Store proto.InternalMessageInfo func (m *Store) GetId() uint64 { if m != nil { @@ -164,13 +246,44 @@ type RegionEpoch struct { // Conf change version, auto increment when add or remove peer ConfVer uint64 `protobuf:"varint,1,opt,name=conf_ver,json=confVer,proto3" json:"conf_ver,omitempty"` // Region version, auto increment when split or merge - Version uint64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Version uint64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegionEpoch) Reset() { *m = RegionEpoch{} } +func (m *RegionEpoch) String() string { return proto.CompactTextString(m) } +func (*RegionEpoch) ProtoMessage() {} +func (*RegionEpoch) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_fa106af9a8e7a522, []int{3} +} +func (m *RegionEpoch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegionEpoch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegionEpoch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RegionEpoch) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegionEpoch.Merge(dst, src) +} +func (m *RegionEpoch) XXX_Size() int { + return m.Size() +} +func (m *RegionEpoch) XXX_DiscardUnknown() { + xxx_messageInfo_RegionEpoch.DiscardUnknown(m) } -func (m *RegionEpoch) Reset() { *m = RegionEpoch{} } -func (m *RegionEpoch) String() string { return proto.CompactTextString(m) } -func (*RegionEpoch) ProtoMessage() {} -func (*RegionEpoch) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{3} } +var xxx_messageInfo_RegionEpoch proto.InternalMessageInfo func (m *RegionEpoch) GetConfVer() uint64 { if m != nil { @@ -189,16 +302,47 @@ func (m *RegionEpoch) GetVersion() uint64 { type Region struct { Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` // Region key range [start_key, end_key). - StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` - EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` - RegionEpoch *RegionEpoch `protobuf:"bytes,4,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` - Peers []*Peer `protobuf:"bytes,5,rep,name=peers" json:"peers,omitempty"` + StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + RegionEpoch *RegionEpoch `protobuf:"bytes,4,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` + Peers []*Peer `protobuf:"bytes,5,rep,name=peers" json:"peers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Region) Reset() { *m = Region{} } +func (m *Region) String() string { return proto.CompactTextString(m) } +func (*Region) ProtoMessage() {} +func (*Region) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_fa106af9a8e7a522, []int{4} +} +func (m *Region) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Region) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Region.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Region) XXX_Merge(src proto.Message) { + xxx_messageInfo_Region.Merge(dst, src) +} +func (m *Region) XXX_Size() int { + return m.Size() +} +func (m *Region) XXX_DiscardUnknown() { + xxx_messageInfo_Region.DiscardUnknown(m) } -func (m *Region) Reset() { *m = Region{} } -func (m *Region) String() string { return proto.CompactTextString(m) } -func (*Region) ProtoMessage() {} -func (*Region) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{4} } +var xxx_messageInfo_Region proto.InternalMessageInfo func (m *Region) GetId() uint64 { if m != nil { @@ -236,15 +380,46 @@ func (m *Region) GetPeers() []*Peer { } type Peer struct { - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` - IsLearner bool `protobuf:"varint,3,opt,name=is_learner,json=isLearner,proto3" json:"is_learner,omitempty"` + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` + IsLearner bool `protobuf:"varint,3,opt,name=is_learner,json=isLearner,proto3" json:"is_learner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Peer) Reset() { *m = Peer{} } +func (m *Peer) String() string { return proto.CompactTextString(m) } +func (*Peer) ProtoMessage() {} +func (*Peer) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_fa106af9a8e7a522, []int{5} +} +func (m *Peer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Peer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Peer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Peer.Merge(dst, src) +} +func (m *Peer) XXX_Size() int { + return m.Size() +} +func (m *Peer) XXX_DiscardUnknown() { + xxx_messageInfo_Peer.DiscardUnknown(m) } -func (m *Peer) Reset() { *m = Peer{} } -func (m *Peer) String() string { return proto.CompactTextString(m) } -func (*Peer) ProtoMessage() {} -func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{5} } +var xxx_messageInfo_Peer proto.InternalMessageInfo func (m *Peer) GetId() uint64 { if m != nil { @@ -301,6 +476,9 @@ func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintMetapb(dAtA, i, uint64(m.MaxPeerCount)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -331,6 +509,9 @@ func (m *StoreLabel) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintMetapb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -383,6 +564,9 @@ func (m *Store) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintMetapb(dAtA, i, uint64(len(m.Version))) i += copy(dAtA[i:], m.Version) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -411,6 +595,9 @@ func (m *RegionEpoch) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintMetapb(dAtA, i, uint64(m.Version)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -468,6 +655,9 @@ func (m *Region) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -506,27 +696,12 @@ func (m *Peer) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } -func encodeFixed64Metapb(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Metapb(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintMetapb(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -537,6 +712,9 @@ func encodeVarintMetapb(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Cluster) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Id != 0 { @@ -545,10 +723,16 @@ func (m *Cluster) Size() (n int) { if m.MaxPeerCount != 0 { n += 1 + sovMetapb(uint64(m.MaxPeerCount)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StoreLabel) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Key) @@ -559,10 +743,16 @@ func (m *StoreLabel) Size() (n int) { if l > 0 { n += 1 + l + sovMetapb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Store) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Id != 0 { @@ -585,10 +775,16 @@ func (m *Store) Size() (n int) { if l > 0 { n += 1 + l + sovMetapb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *RegionEpoch) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ConfVer != 0 { @@ -597,10 +793,16 @@ func (m *RegionEpoch) Size() (n int) { if m.Version != 0 { n += 1 + sovMetapb(uint64(m.Version)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Region) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Id != 0 { @@ -624,10 +826,16 @@ func (m *Region) Size() (n int) { n += 1 + l + sovMetapb(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Peer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Id != 0 { @@ -639,6 +847,9 @@ func (m *Peer) Size() (n int) { if m.IsLearner { n += 2 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -734,6 +945,7 @@ func (m *Cluster) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -842,6 +1054,7 @@ func (m *StoreLabel) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1019,6 +1232,7 @@ func (m *Store) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1107,6 +1321,7 @@ func (m *RegionEpoch) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1302,6 +1517,7 @@ func (m *Region) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1410,6 +1626,7 @@ func (m *Peer) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1524,9 +1741,9 @@ var ( ErrIntOverflowMetapb = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("metapb.proto", fileDescriptorMetapb) } +func init() { proto.RegisterFile("metapb.proto", fileDescriptor_metapb_fa106af9a8e7a522) } -var fileDescriptorMetapb = []byte{ +var fileDescriptor_metapb_fa106af9a8e7a522 = []byte{ // 479 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0xc1, 0x8e, 0xd3, 0x3c, 0x10, 0x5e, 0xa7, 0x4d, 0xda, 0x4e, 0xbb, 0x55, 0xe5, 0x7f, 0xa5, 0x3f, 0x0b, 0xa2, 0xaa, 0x22, diff --git a/vendor/github.com/pingcap/kvproto/pkg/pdpb/pdpb.pb.go b/vendor/github.com/pingcap/kvproto/pkg/pdpb/pdpb.pb.go index e0a6dce3dca..284b796de06 100644 --- a/vendor/github.com/pingcap/kvproto/pkg/pdpb/pdpb.pb.go +++ b/vendor/github.com/pingcap/kvproto/pkg/pdpb/pdpb.pb.go @@ -1,69 +1,6 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: pdpb.proto -// DO NOT EDIT! - -/* - Package pdpb is a generated protocol buffer package. - - It is generated from these files: - pdpb.proto - - It has these top-level messages: - RequestHeader - ResponseHeader - Error - TsoRequest - Timestamp - TsoResponse - BootstrapRequest - BootstrapResponse - IsBootstrappedRequest - IsBootstrappedResponse - AllocIDRequest - AllocIDResponse - GetStoreRequest - GetStoreResponse - PutStoreRequest - PutStoreResponse - GetAllStoresRequest - GetAllStoresResponse - GetRegionRequest - GetRegionResponse - GetRegionByIDRequest - GetClusterConfigRequest - GetClusterConfigResponse - PutClusterConfigRequest - PutClusterConfigResponse - Member - GetMembersRequest - GetMembersResponse - PeerStats - RegionHeartbeatRequest - ChangePeer - TransferLeader - Merge - SplitRegion - RegionHeartbeatResponse - AskSplitRequest - AskSplitResponse - ReportSplitRequest - ReportSplitResponse - AskBatchSplitRequest - SplitID - AskBatchSplitResponse - ReportBatchSplitRequest - ReportBatchSplitResponse - TimeInterval - StoreStats - StoreHeartbeatRequest - StoreHeartbeatResponse - ScatterRegionRequest - ScatterRegionResponse - GetGCSafePointRequest - GetGCSafePointResponse - UpdateGCSafePointRequest - UpdateGCSafePointResponse -*/ + package pdpb import ( @@ -73,10 +10,12 @@ import ( proto "github.com/golang/protobuf/proto" - metapb "github.com/pingcap/kvproto/pkg/metapb" + _ "github.com/gogo/protobuf/gogoproto" eraftpb "github.com/pingcap/kvproto/pkg/eraftpb" + metapb "github.com/pingcap/kvproto/pkg/metapb" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" @@ -124,7 +63,9 @@ var ErrorType_value = map[string]int32{ func (x ErrorType) String() string { return proto.EnumName(ErrorType_name, int32(x)) } -func (ErrorType) EnumDescriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{0} } +func (ErrorType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{0} +} type CheckPolicy int32 @@ -145,17 +86,50 @@ var CheckPolicy_value = map[string]int32{ func (x CheckPolicy) String() string { return proto.EnumName(CheckPolicy_name, int32(x)) } -func (CheckPolicy) EnumDescriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{1} } +func (CheckPolicy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{1} +} type RequestHeader struct { // cluster_id is the ID of the cluster which be sent to. - ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{0} +} +func (m *RequestHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RequestHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestHeader.Merge(dst, src) +} +func (m *RequestHeader) XXX_Size() int { + return m.Size() +} +func (m *RequestHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RequestHeader.DiscardUnknown(m) } -func (m *RequestHeader) Reset() { *m = RequestHeader{} } -func (m *RequestHeader) String() string { return proto.CompactTextString(m) } -func (*RequestHeader) ProtoMessage() {} -func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{0} } +var xxx_messageInfo_RequestHeader proto.InternalMessageInfo func (m *RequestHeader) GetClusterId() uint64 { if m != nil { @@ -166,14 +140,45 @@ func (m *RequestHeader) GetClusterId() uint64 { type ResponseHeader struct { // cluster_id is the ID of the cluster which sent the response. - ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{1} +} +func (m *ResponseHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponseHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseHeader.Merge(dst, src) +} +func (m *ResponseHeader) XXX_Size() int { + return m.Size() +} +func (m *ResponseHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseHeader.DiscardUnknown(m) } -func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } -func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } -func (*ResponseHeader) ProtoMessage() {} -func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{1} } +var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo func (m *ResponseHeader) GetClusterId() uint64 { if m != nil { @@ -190,14 +195,45 @@ func (m *ResponseHeader) GetError() *Error { } type Error struct { - Type ErrorType `protobuf:"varint,1,opt,name=type,proto3,enum=pdpb.ErrorType" json:"type,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Type ErrorType `protobuf:"varint,1,opt,name=type,proto3,enum=pdpb.ErrorType" json:"type,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{2} +} +func (m *Error) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(dst, src) +} +func (m *Error) XXX_Size() int { + return m.Size() +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) } -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} -func (*Error) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{2} } +var xxx_messageInfo_Error proto.InternalMessageInfo func (m *Error) GetType() ErrorType { if m != nil { @@ -214,14 +250,45 @@ func (m *Error) GetMessage() string { } type TsoRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TsoRequest) Reset() { *m = TsoRequest{} } +func (m *TsoRequest) String() string { return proto.CompactTextString(m) } +func (*TsoRequest) ProtoMessage() {} +func (*TsoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{3} +} +func (m *TsoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TsoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TsoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TsoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TsoRequest.Merge(dst, src) +} +func (m *TsoRequest) XXX_Size() int { + return m.Size() +} +func (m *TsoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TsoRequest.DiscardUnknown(m) } -func (m *TsoRequest) Reset() { *m = TsoRequest{} } -func (m *TsoRequest) String() string { return proto.CompactTextString(m) } -func (*TsoRequest) ProtoMessage() {} -func (*TsoRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{3} } +var xxx_messageInfo_TsoRequest proto.InternalMessageInfo func (m *TsoRequest) GetHeader() *RequestHeader { if m != nil { @@ -238,14 +305,45 @@ func (m *TsoRequest) GetCount() uint32 { } type Timestamp struct { - Physical int64 `protobuf:"varint,1,opt,name=physical,proto3" json:"physical,omitempty"` - Logical int64 `protobuf:"varint,2,opt,name=logical,proto3" json:"logical,omitempty"` + Physical int64 `protobuf:"varint,1,opt,name=physical,proto3" json:"physical,omitempty"` + Logical int64 `protobuf:"varint,2,opt,name=logical,proto3" json:"logical,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{4} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{4} } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetPhysical() int64 { if m != nil { @@ -262,15 +360,46 @@ func (m *Timestamp) GetLogical() int64 { } type TsoResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` - Timestamp *Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + Timestamp *Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TsoResponse) Reset() { *m = TsoResponse{} } +func (m *TsoResponse) String() string { return proto.CompactTextString(m) } +func (*TsoResponse) ProtoMessage() {} +func (*TsoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{5} +} +func (m *TsoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TsoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TsoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TsoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TsoResponse.Merge(dst, src) +} +func (m *TsoResponse) XXX_Size() int { + return m.Size() +} +func (m *TsoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TsoResponse.DiscardUnknown(m) } -func (m *TsoResponse) Reset() { *m = TsoResponse{} } -func (m *TsoResponse) String() string { return proto.CompactTextString(m) } -func (*TsoResponse) ProtoMessage() {} -func (*TsoResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{5} } +var xxx_messageInfo_TsoResponse proto.InternalMessageInfo func (m *TsoResponse) GetHeader() *ResponseHeader { if m != nil { @@ -294,15 +423,46 @@ func (m *TsoResponse) GetTimestamp() *Timestamp { } type BootstrapRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` - Region *metapb.Region `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` + Region *metapb.Region `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BootstrapRequest) Reset() { *m = BootstrapRequest{} } +func (m *BootstrapRequest) String() string { return proto.CompactTextString(m) } +func (*BootstrapRequest) ProtoMessage() {} +func (*BootstrapRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{6} +} +func (m *BootstrapRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BootstrapRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BootstrapRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BootstrapRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BootstrapRequest.Merge(dst, src) +} +func (m *BootstrapRequest) XXX_Size() int { + return m.Size() +} +func (m *BootstrapRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BootstrapRequest.DiscardUnknown(m) } -func (m *BootstrapRequest) Reset() { *m = BootstrapRequest{} } -func (m *BootstrapRequest) String() string { return proto.CompactTextString(m) } -func (*BootstrapRequest) ProtoMessage() {} -func (*BootstrapRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{6} } +var xxx_messageInfo_BootstrapRequest proto.InternalMessageInfo func (m *BootstrapRequest) GetHeader() *RequestHeader { if m != nil { @@ -326,13 +486,44 @@ func (m *BootstrapRequest) GetRegion() *metapb.Region { } type BootstrapResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BootstrapResponse) Reset() { *m = BootstrapResponse{} } +func (m *BootstrapResponse) String() string { return proto.CompactTextString(m) } +func (*BootstrapResponse) ProtoMessage() {} +func (*BootstrapResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{7} +} +func (m *BootstrapResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BootstrapResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BootstrapResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BootstrapResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BootstrapResponse.Merge(dst, src) +} +func (m *BootstrapResponse) XXX_Size() int { + return m.Size() +} +func (m *BootstrapResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BootstrapResponse.DiscardUnknown(m) } -func (m *BootstrapResponse) Reset() { *m = BootstrapResponse{} } -func (m *BootstrapResponse) String() string { return proto.CompactTextString(m) } -func (*BootstrapResponse) ProtoMessage() {} -func (*BootstrapResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{7} } +var xxx_messageInfo_BootstrapResponse proto.InternalMessageInfo func (m *BootstrapResponse) GetHeader() *ResponseHeader { if m != nil { @@ -342,13 +533,44 @@ func (m *BootstrapResponse) GetHeader() *ResponseHeader { } type IsBootstrappedRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IsBootstrappedRequest) Reset() { *m = IsBootstrappedRequest{} } +func (m *IsBootstrappedRequest) String() string { return proto.CompactTextString(m) } +func (*IsBootstrappedRequest) ProtoMessage() {} +func (*IsBootstrappedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{8} +} +func (m *IsBootstrappedRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsBootstrappedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IsBootstrappedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *IsBootstrappedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsBootstrappedRequest.Merge(dst, src) +} +func (m *IsBootstrappedRequest) XXX_Size() int { + return m.Size() +} +func (m *IsBootstrappedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IsBootstrappedRequest.DiscardUnknown(m) } -func (m *IsBootstrappedRequest) Reset() { *m = IsBootstrappedRequest{} } -func (m *IsBootstrappedRequest) String() string { return proto.CompactTextString(m) } -func (*IsBootstrappedRequest) ProtoMessage() {} -func (*IsBootstrappedRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{8} } +var xxx_messageInfo_IsBootstrappedRequest proto.InternalMessageInfo func (m *IsBootstrappedRequest) GetHeader() *RequestHeader { if m != nil { @@ -358,14 +580,45 @@ func (m *IsBootstrappedRequest) GetHeader() *RequestHeader { } type IsBootstrappedResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Bootstrapped bool `protobuf:"varint,2,opt,name=bootstrapped,proto3" json:"bootstrapped,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Bootstrapped bool `protobuf:"varint,2,opt,name=bootstrapped,proto3" json:"bootstrapped,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IsBootstrappedResponse) Reset() { *m = IsBootstrappedResponse{} } +func (m *IsBootstrappedResponse) String() string { return proto.CompactTextString(m) } +func (*IsBootstrappedResponse) ProtoMessage() {} +func (*IsBootstrappedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{9} +} +func (m *IsBootstrappedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsBootstrappedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IsBootstrappedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *IsBootstrappedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsBootstrappedResponse.Merge(dst, src) +} +func (m *IsBootstrappedResponse) XXX_Size() int { + return m.Size() +} +func (m *IsBootstrappedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IsBootstrappedResponse.DiscardUnknown(m) } -func (m *IsBootstrappedResponse) Reset() { *m = IsBootstrappedResponse{} } -func (m *IsBootstrappedResponse) String() string { return proto.CompactTextString(m) } -func (*IsBootstrappedResponse) ProtoMessage() {} -func (*IsBootstrappedResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{9} } +var xxx_messageInfo_IsBootstrappedResponse proto.InternalMessageInfo func (m *IsBootstrappedResponse) GetHeader() *ResponseHeader { if m != nil { @@ -382,13 +635,44 @@ func (m *IsBootstrappedResponse) GetBootstrapped() bool { } type AllocIDRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocIDRequest) Reset() { *m = AllocIDRequest{} } +func (m *AllocIDRequest) String() string { return proto.CompactTextString(m) } +func (*AllocIDRequest) ProtoMessage() {} +func (*AllocIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{10} +} +func (m *AllocIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AllocIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AllocIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocIDRequest.Merge(dst, src) +} +func (m *AllocIDRequest) XXX_Size() int { + return m.Size() +} +func (m *AllocIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AllocIDRequest.DiscardUnknown(m) } -func (m *AllocIDRequest) Reset() { *m = AllocIDRequest{} } -func (m *AllocIDRequest) String() string { return proto.CompactTextString(m) } -func (*AllocIDRequest) ProtoMessage() {} -func (*AllocIDRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{10} } +var xxx_messageInfo_AllocIDRequest proto.InternalMessageInfo func (m *AllocIDRequest) GetHeader() *RequestHeader { if m != nil { @@ -398,14 +682,45 @@ func (m *AllocIDRequest) GetHeader() *RequestHeader { } type AllocIDResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocIDResponse) Reset() { *m = AllocIDResponse{} } +func (m *AllocIDResponse) String() string { return proto.CompactTextString(m) } +func (*AllocIDResponse) ProtoMessage() {} +func (*AllocIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{11} +} +func (m *AllocIDResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AllocIDResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AllocIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocIDResponse.Merge(dst, src) +} +func (m *AllocIDResponse) XXX_Size() int { + return m.Size() +} +func (m *AllocIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AllocIDResponse.DiscardUnknown(m) } -func (m *AllocIDResponse) Reset() { *m = AllocIDResponse{} } -func (m *AllocIDResponse) String() string { return proto.CompactTextString(m) } -func (*AllocIDResponse) ProtoMessage() {} -func (*AllocIDResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{11} } +var xxx_messageInfo_AllocIDResponse proto.InternalMessageInfo func (m *AllocIDResponse) GetHeader() *ResponseHeader { if m != nil { @@ -422,14 +737,45 @@ func (m *AllocIDResponse) GetId() uint64 { } type GetStoreRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStoreRequest) Reset() { *m = GetStoreRequest{} } +func (m *GetStoreRequest) String() string { return proto.CompactTextString(m) } +func (*GetStoreRequest) ProtoMessage() {} +func (*GetStoreRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{12} +} +func (m *GetStoreRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetStoreRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetStoreRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStoreRequest.Merge(dst, src) +} +func (m *GetStoreRequest) XXX_Size() int { + return m.Size() +} +func (m *GetStoreRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetStoreRequest.DiscardUnknown(m) } -func (m *GetStoreRequest) Reset() { *m = GetStoreRequest{} } -func (m *GetStoreRequest) String() string { return proto.CompactTextString(m) } -func (*GetStoreRequest) ProtoMessage() {} -func (*GetStoreRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{12} } +var xxx_messageInfo_GetStoreRequest proto.InternalMessageInfo func (m *GetStoreRequest) GetHeader() *RequestHeader { if m != nil { @@ -446,14 +792,45 @@ func (m *GetStoreRequest) GetStoreId() uint64 { } type GetStoreResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStoreResponse) Reset() { *m = GetStoreResponse{} } +func (m *GetStoreResponse) String() string { return proto.CompactTextString(m) } +func (*GetStoreResponse) ProtoMessage() {} +func (*GetStoreResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{13} +} +func (m *GetStoreResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetStoreResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetStoreResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStoreResponse.Merge(dst, src) +} +func (m *GetStoreResponse) XXX_Size() int { + return m.Size() +} +func (m *GetStoreResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetStoreResponse.DiscardUnknown(m) } -func (m *GetStoreResponse) Reset() { *m = GetStoreResponse{} } -func (m *GetStoreResponse) String() string { return proto.CompactTextString(m) } -func (*GetStoreResponse) ProtoMessage() {} -func (*GetStoreResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{13} } +var xxx_messageInfo_GetStoreResponse proto.InternalMessageInfo func (m *GetStoreResponse) GetHeader() *ResponseHeader { if m != nil { @@ -470,14 +847,45 @@ func (m *GetStoreResponse) GetStore() *metapb.Store { } type PutStoreRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutStoreRequest) Reset() { *m = PutStoreRequest{} } +func (m *PutStoreRequest) String() string { return proto.CompactTextString(m) } +func (*PutStoreRequest) ProtoMessage() {} +func (*PutStoreRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{14} +} +func (m *PutStoreRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutStoreRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutStoreRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutStoreRequest.Merge(dst, src) +} +func (m *PutStoreRequest) XXX_Size() int { + return m.Size() +} +func (m *PutStoreRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutStoreRequest.DiscardUnknown(m) } -func (m *PutStoreRequest) Reset() { *m = PutStoreRequest{} } -func (m *PutStoreRequest) String() string { return proto.CompactTextString(m) } -func (*PutStoreRequest) ProtoMessage() {} -func (*PutStoreRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{14} } +var xxx_messageInfo_PutStoreRequest proto.InternalMessageInfo func (m *PutStoreRequest) GetHeader() *RequestHeader { if m != nil { @@ -494,13 +902,44 @@ func (m *PutStoreRequest) GetStore() *metapb.Store { } type PutStoreResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutStoreResponse) Reset() { *m = PutStoreResponse{} } +func (m *PutStoreResponse) String() string { return proto.CompactTextString(m) } +func (*PutStoreResponse) ProtoMessage() {} +func (*PutStoreResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{15} +} +func (m *PutStoreResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutStoreResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutStoreResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutStoreResponse.Merge(dst, src) +} +func (m *PutStoreResponse) XXX_Size() int { + return m.Size() +} +func (m *PutStoreResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutStoreResponse.DiscardUnknown(m) } -func (m *PutStoreResponse) Reset() { *m = PutStoreResponse{} } -func (m *PutStoreResponse) String() string { return proto.CompactTextString(m) } -func (*PutStoreResponse) ProtoMessage() {} -func (*PutStoreResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{15} } +var xxx_messageInfo_PutStoreResponse proto.InternalMessageInfo func (m *PutStoreResponse) GetHeader() *ResponseHeader { if m != nil { @@ -510,13 +949,44 @@ func (m *PutStoreResponse) GetHeader() *ResponseHeader { } type GetAllStoresRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAllStoresRequest) Reset() { *m = GetAllStoresRequest{} } +func (m *GetAllStoresRequest) String() string { return proto.CompactTextString(m) } +func (*GetAllStoresRequest) ProtoMessage() {} +func (*GetAllStoresRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{16} +} +func (m *GetAllStoresRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetAllStoresRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetAllStoresRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetAllStoresRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllStoresRequest.Merge(dst, src) +} +func (m *GetAllStoresRequest) XXX_Size() int { + return m.Size() +} +func (m *GetAllStoresRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllStoresRequest.DiscardUnknown(m) } -func (m *GetAllStoresRequest) Reset() { *m = GetAllStoresRequest{} } -func (m *GetAllStoresRequest) String() string { return proto.CompactTextString(m) } -func (*GetAllStoresRequest) ProtoMessage() {} -func (*GetAllStoresRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{16} } +var xxx_messageInfo_GetAllStoresRequest proto.InternalMessageInfo func (m *GetAllStoresRequest) GetHeader() *RequestHeader { if m != nil { @@ -526,14 +996,45 @@ func (m *GetAllStoresRequest) GetHeader() *RequestHeader { } type GetAllStoresResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Stores []*metapb.Store `protobuf:"bytes,2,rep,name=stores" json:"stores,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Stores []*metapb.Store `protobuf:"bytes,2,rep,name=stores" json:"stores,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAllStoresResponse) Reset() { *m = GetAllStoresResponse{} } +func (m *GetAllStoresResponse) String() string { return proto.CompactTextString(m) } +func (*GetAllStoresResponse) ProtoMessage() {} +func (*GetAllStoresResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{17} +} +func (m *GetAllStoresResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetAllStoresResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetAllStoresResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetAllStoresResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllStoresResponse.Merge(dst, src) +} +func (m *GetAllStoresResponse) XXX_Size() int { + return m.Size() +} +func (m *GetAllStoresResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllStoresResponse.DiscardUnknown(m) } -func (m *GetAllStoresResponse) Reset() { *m = GetAllStoresResponse{} } -func (m *GetAllStoresResponse) String() string { return proto.CompactTextString(m) } -func (*GetAllStoresResponse) ProtoMessage() {} -func (*GetAllStoresResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{17} } +var xxx_messageInfo_GetAllStoresResponse proto.InternalMessageInfo func (m *GetAllStoresResponse) GetHeader() *ResponseHeader { if m != nil { @@ -550,14 +1051,45 @@ func (m *GetAllStoresResponse) GetStores() []*metapb.Store { } type GetRegionRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - RegionKey []byte `protobuf:"bytes,2,opt,name=region_key,json=regionKey,proto3" json:"region_key,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + RegionKey []byte `protobuf:"bytes,2,opt,name=region_key,json=regionKey,proto3" json:"region_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRegionRequest) Reset() { *m = GetRegionRequest{} } +func (m *GetRegionRequest) String() string { return proto.CompactTextString(m) } +func (*GetRegionRequest) ProtoMessage() {} +func (*GetRegionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{18} +} +func (m *GetRegionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetRegionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetRegionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRegionRequest.Merge(dst, src) +} +func (m *GetRegionRequest) XXX_Size() int { + return m.Size() +} +func (m *GetRegionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRegionRequest.DiscardUnknown(m) } -func (m *GetRegionRequest) Reset() { *m = GetRegionRequest{} } -func (m *GetRegionRequest) String() string { return proto.CompactTextString(m) } -func (*GetRegionRequest) ProtoMessage() {} -func (*GetRegionRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{18} } +var xxx_messageInfo_GetRegionRequest proto.InternalMessageInfo func (m *GetRegionRequest) GetHeader() *RequestHeader { if m != nil { @@ -574,15 +1106,46 @@ func (m *GetRegionRequest) GetRegionKey() []byte { } type GetRegionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` - Leader *metapb.Peer `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + Leader *metapb.Peer `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRegionResponse) Reset() { *m = GetRegionResponse{} } +func (m *GetRegionResponse) String() string { return proto.CompactTextString(m) } +func (*GetRegionResponse) ProtoMessage() {} +func (*GetRegionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{19} +} +func (m *GetRegionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetRegionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetRegionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRegionResponse.Merge(dst, src) +} +func (m *GetRegionResponse) XXX_Size() int { + return m.Size() +} +func (m *GetRegionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetRegionResponse.DiscardUnknown(m) } -func (m *GetRegionResponse) Reset() { *m = GetRegionResponse{} } -func (m *GetRegionResponse) String() string { return proto.CompactTextString(m) } -func (*GetRegionResponse) ProtoMessage() {} -func (*GetRegionResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{19} } +var xxx_messageInfo_GetRegionResponse proto.InternalMessageInfo func (m *GetRegionResponse) GetHeader() *ResponseHeader { if m != nil { @@ -606,14 +1169,45 @@ func (m *GetRegionResponse) GetLeader() *metapb.Peer { } type GetRegionByIDRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRegionByIDRequest) Reset() { *m = GetRegionByIDRequest{} } +func (m *GetRegionByIDRequest) String() string { return proto.CompactTextString(m) } +func (*GetRegionByIDRequest) ProtoMessage() {} +func (*GetRegionByIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{20} +} +func (m *GetRegionByIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetRegionByIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetRegionByIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetRegionByIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRegionByIDRequest.Merge(dst, src) +} +func (m *GetRegionByIDRequest) XXX_Size() int { + return m.Size() +} +func (m *GetRegionByIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRegionByIDRequest.DiscardUnknown(m) } -func (m *GetRegionByIDRequest) Reset() { *m = GetRegionByIDRequest{} } -func (m *GetRegionByIDRequest) String() string { return proto.CompactTextString(m) } -func (*GetRegionByIDRequest) ProtoMessage() {} -func (*GetRegionByIDRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{20} } +var xxx_messageInfo_GetRegionByIDRequest proto.InternalMessageInfo func (m *GetRegionByIDRequest) GetHeader() *RequestHeader { if m != nil { @@ -630,13 +1224,44 @@ func (m *GetRegionByIDRequest) GetRegionId() uint64 { } type GetClusterConfigRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterConfigRequest) Reset() { *m = GetClusterConfigRequest{} } +func (m *GetClusterConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterConfigRequest) ProtoMessage() {} +func (*GetClusterConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{21} +} +func (m *GetClusterConfigRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetClusterConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetClusterConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetClusterConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterConfigRequest.Merge(dst, src) +} +func (m *GetClusterConfigRequest) XXX_Size() int { + return m.Size() +} +func (m *GetClusterConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterConfigRequest.DiscardUnknown(m) } -func (m *GetClusterConfigRequest) Reset() { *m = GetClusterConfigRequest{} } -func (m *GetClusterConfigRequest) String() string { return proto.CompactTextString(m) } -func (*GetClusterConfigRequest) ProtoMessage() {} -func (*GetClusterConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{21} } +var xxx_messageInfo_GetClusterConfigRequest proto.InternalMessageInfo func (m *GetClusterConfigRequest) GetHeader() *RequestHeader { if m != nil { @@ -646,14 +1271,45 @@ func (m *GetClusterConfigRequest) GetHeader() *RequestHeader { } type GetClusterConfigResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Cluster *metapb.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Cluster *metapb.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterConfigResponse) Reset() { *m = GetClusterConfigResponse{} } +func (m *GetClusterConfigResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterConfigResponse) ProtoMessage() {} +func (*GetClusterConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{22} +} +func (m *GetClusterConfigResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetClusterConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetClusterConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetClusterConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterConfigResponse.Merge(dst, src) +} +func (m *GetClusterConfigResponse) XXX_Size() int { + return m.Size() +} +func (m *GetClusterConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterConfigResponse.DiscardUnknown(m) } -func (m *GetClusterConfigResponse) Reset() { *m = GetClusterConfigResponse{} } -func (m *GetClusterConfigResponse) String() string { return proto.CompactTextString(m) } -func (*GetClusterConfigResponse) ProtoMessage() {} -func (*GetClusterConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{22} } +var xxx_messageInfo_GetClusterConfigResponse proto.InternalMessageInfo func (m *GetClusterConfigResponse) GetHeader() *ResponseHeader { if m != nil { @@ -670,14 +1326,45 @@ func (m *GetClusterConfigResponse) GetCluster() *metapb.Cluster { } type PutClusterConfigRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Cluster *metapb.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Cluster *metapb.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutClusterConfigRequest) Reset() { *m = PutClusterConfigRequest{} } +func (m *PutClusterConfigRequest) String() string { return proto.CompactTextString(m) } +func (*PutClusterConfigRequest) ProtoMessage() {} +func (*PutClusterConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{23} +} +func (m *PutClusterConfigRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutClusterConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutClusterConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutClusterConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutClusterConfigRequest.Merge(dst, src) +} +func (m *PutClusterConfigRequest) XXX_Size() int { + return m.Size() +} +func (m *PutClusterConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutClusterConfigRequest.DiscardUnknown(m) } -func (m *PutClusterConfigRequest) Reset() { *m = PutClusterConfigRequest{} } -func (m *PutClusterConfigRequest) String() string { return proto.CompactTextString(m) } -func (*PutClusterConfigRequest) ProtoMessage() {} -func (*PutClusterConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{23} } +var xxx_messageInfo_PutClusterConfigRequest proto.InternalMessageInfo func (m *PutClusterConfigRequest) GetHeader() *RequestHeader { if m != nil { @@ -694,13 +1381,44 @@ func (m *PutClusterConfigRequest) GetCluster() *metapb.Cluster { } type PutClusterConfigResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutClusterConfigResponse) Reset() { *m = PutClusterConfigResponse{} } +func (m *PutClusterConfigResponse) String() string { return proto.CompactTextString(m) } +func (*PutClusterConfigResponse) ProtoMessage() {} +func (*PutClusterConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{24} +} +func (m *PutClusterConfigResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutClusterConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutClusterConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutClusterConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutClusterConfigResponse.Merge(dst, src) +} +func (m *PutClusterConfigResponse) XXX_Size() int { + return m.Size() +} +func (m *PutClusterConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutClusterConfigResponse.DiscardUnknown(m) } -func (m *PutClusterConfigResponse) Reset() { *m = PutClusterConfigResponse{} } -func (m *PutClusterConfigResponse) String() string { return proto.CompactTextString(m) } -func (*PutClusterConfigResponse) ProtoMessage() {} -func (*PutClusterConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{24} } +var xxx_messageInfo_PutClusterConfigResponse proto.InternalMessageInfo func (m *PutClusterConfigResponse) GetHeader() *ResponseHeader { if m != nil { @@ -713,16 +1431,47 @@ type Member struct { // name is the name of the PD member. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // member_id is the unique id of the PD member. - MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` - PeerUrls []string `protobuf:"bytes,3,rep,name=peer_urls,json=peerUrls" json:"peer_urls,omitempty"` - ClientUrls []string `protobuf:"bytes,4,rep,name=client_urls,json=clientUrls" json:"client_urls,omitempty"` - LeaderPriority int32 `protobuf:"varint,5,opt,name=leader_priority,json=leaderPriority,proto3" json:"leader_priority,omitempty"` + MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` + PeerUrls []string `protobuf:"bytes,3,rep,name=peer_urls,json=peerUrls" json:"peer_urls,omitempty"` + ClientUrls []string `protobuf:"bytes,4,rep,name=client_urls,json=clientUrls" json:"client_urls,omitempty"` + LeaderPriority int32 `protobuf:"varint,5,opt,name=leader_priority,json=leaderPriority,proto3" json:"leader_priority,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{25} +} +func (m *Member) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Member.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Member) XXX_Merge(src proto.Message) { + xxx_messageInfo_Member.Merge(dst, src) +} +func (m *Member) XXX_Size() int { + return m.Size() +} +func (m *Member) XXX_DiscardUnknown() { + xxx_messageInfo_Member.DiscardUnknown(m) } -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{25} } +var xxx_messageInfo_Member proto.InternalMessageInfo func (m *Member) GetName() string { if m != nil { @@ -760,13 +1509,44 @@ func (m *Member) GetLeaderPriority() int32 { } type GetMembersRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMembersRequest) Reset() { *m = GetMembersRequest{} } +func (m *GetMembersRequest) String() string { return proto.CompactTextString(m) } +func (*GetMembersRequest) ProtoMessage() {} +func (*GetMembersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{26} +} +func (m *GetMembersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetMembersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetMembersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetMembersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMembersRequest.Merge(dst, src) +} +func (m *GetMembersRequest) XXX_Size() int { + return m.Size() +} +func (m *GetMembersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetMembersRequest.DiscardUnknown(m) } -func (m *GetMembersRequest) Reset() { *m = GetMembersRequest{} } -func (m *GetMembersRequest) String() string { return proto.CompactTextString(m) } -func (*GetMembersRequest) ProtoMessage() {} -func (*GetMembersRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{26} } +var xxx_messageInfo_GetMembersRequest proto.InternalMessageInfo func (m *GetMembersRequest) GetHeader() *RequestHeader { if m != nil { @@ -776,16 +1556,47 @@ func (m *GetMembersRequest) GetHeader() *RequestHeader { } type GetMembersResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` - Leader *Member `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` - EtcdLeader *Member `protobuf:"bytes,4,opt,name=etcd_leader,json=etcdLeader" json:"etcd_leader,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` + Leader *Member `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` + EtcdLeader *Member `protobuf:"bytes,4,opt,name=etcd_leader,json=etcdLeader" json:"etcd_leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMembersResponse) Reset() { *m = GetMembersResponse{} } +func (m *GetMembersResponse) String() string { return proto.CompactTextString(m) } +func (*GetMembersResponse) ProtoMessage() {} +func (*GetMembersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{27} +} +func (m *GetMembersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetMembersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetMembersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetMembersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMembersResponse.Merge(dst, src) +} +func (m *GetMembersResponse) XXX_Size() int { + return m.Size() +} +func (m *GetMembersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetMembersResponse.DiscardUnknown(m) } -func (m *GetMembersResponse) Reset() { *m = GetMembersResponse{} } -func (m *GetMembersResponse) String() string { return proto.CompactTextString(m) } -func (*GetMembersResponse) ProtoMessage() {} -func (*GetMembersResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{27} } +var xxx_messageInfo_GetMembersResponse proto.InternalMessageInfo func (m *GetMembersResponse) GetHeader() *ResponseHeader { if m != nil { @@ -816,14 +1627,45 @@ func (m *GetMembersResponse) GetEtcdLeader() *Member { } type PeerStats struct { - Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` - DownSeconds uint64 `protobuf:"varint,2,opt,name=down_seconds,json=downSeconds,proto3" json:"down_seconds,omitempty"` + Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + DownSeconds uint64 `protobuf:"varint,2,opt,name=down_seconds,json=downSeconds,proto3" json:"down_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PeerStats) Reset() { *m = PeerStats{} } +func (m *PeerStats) String() string { return proto.CompactTextString(m) } +func (*PeerStats) ProtoMessage() {} +func (*PeerStats) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{28} +} +func (m *PeerStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PeerStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PeerStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PeerStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerStats.Merge(dst, src) +} +func (m *PeerStats) XXX_Size() int { + return m.Size() +} +func (m *PeerStats) XXX_DiscardUnknown() { + xxx_messageInfo_PeerStats.DiscardUnknown(m) } -func (m *PeerStats) Reset() { *m = PeerStats{} } -func (m *PeerStats) String() string { return proto.CompactTextString(m) } -func (*PeerStats) ProtoMessage() {} -func (*PeerStats) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{28} } +var xxx_messageInfo_PeerStats proto.InternalMessageInfo func (m *PeerStats) GetPeer() *metapb.Peer { if m != nil { @@ -860,13 +1702,44 @@ type RegionHeartbeatRequest struct { // Actually reported time interval Interval *TimeInterval `protobuf:"bytes,12,opt,name=interval" json:"interval,omitempty"` // Approximate number of keys. - ApproximateKeys uint64 `protobuf:"varint,13,opt,name=approximate_keys,json=approximateKeys,proto3" json:"approximate_keys,omitempty"` + ApproximateKeys uint64 `protobuf:"varint,13,opt,name=approximate_keys,json=approximateKeys,proto3" json:"approximate_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegionHeartbeatRequest) Reset() { *m = RegionHeartbeatRequest{} } +func (m *RegionHeartbeatRequest) String() string { return proto.CompactTextString(m) } +func (*RegionHeartbeatRequest) ProtoMessage() {} +func (*RegionHeartbeatRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{29} +} +func (m *RegionHeartbeatRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegionHeartbeatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegionHeartbeatRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RegionHeartbeatRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegionHeartbeatRequest.Merge(dst, src) +} +func (m *RegionHeartbeatRequest) XXX_Size() int { + return m.Size() +} +func (m *RegionHeartbeatRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegionHeartbeatRequest.DiscardUnknown(m) } -func (m *RegionHeartbeatRequest) Reset() { *m = RegionHeartbeatRequest{} } -func (m *RegionHeartbeatRequest) String() string { return proto.CompactTextString(m) } -func (*RegionHeartbeatRequest) ProtoMessage() {} -func (*RegionHeartbeatRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{29} } +var xxx_messageInfo_RegionHeartbeatRequest proto.InternalMessageInfo func (m *RegionHeartbeatRequest) GetHeader() *RequestHeader { if m != nil { @@ -953,14 +1826,45 @@ func (m *RegionHeartbeatRequest) GetApproximateKeys() uint64 { } type ChangePeer struct { - Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` - ChangeType eraftpb.ConfChangeType `protobuf:"varint,2,opt,name=change_type,json=changeType,proto3,enum=eraftpb.ConfChangeType" json:"change_type,omitempty"` + Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + ChangeType eraftpb.ConfChangeType `protobuf:"varint,2,opt,name=change_type,json=changeType,proto3,enum=eraftpb.ConfChangeType" json:"change_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChangePeer) Reset() { *m = ChangePeer{} } +func (m *ChangePeer) String() string { return proto.CompactTextString(m) } +func (*ChangePeer) ProtoMessage() {} +func (*ChangePeer) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{30} +} +func (m *ChangePeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChangePeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChangePeer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ChangePeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangePeer.Merge(dst, src) +} +func (m *ChangePeer) XXX_Size() int { + return m.Size() +} +func (m *ChangePeer) XXX_DiscardUnknown() { + xxx_messageInfo_ChangePeer.DiscardUnknown(m) } -func (m *ChangePeer) Reset() { *m = ChangePeer{} } -func (m *ChangePeer) String() string { return proto.CompactTextString(m) } -func (*ChangePeer) ProtoMessage() {} -func (*ChangePeer) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{30} } +var xxx_messageInfo_ChangePeer proto.InternalMessageInfo func (m *ChangePeer) GetPeer() *metapb.Peer { if m != nil { @@ -977,13 +1881,44 @@ func (m *ChangePeer) GetChangeType() eraftpb.ConfChangeType { } type TransferLeader struct { - Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransferLeader) Reset() { *m = TransferLeader{} } +func (m *TransferLeader) String() string { return proto.CompactTextString(m) } +func (*TransferLeader) ProtoMessage() {} +func (*TransferLeader) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{31} +} +func (m *TransferLeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferLeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferLeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TransferLeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferLeader.Merge(dst, src) +} +func (m *TransferLeader) XXX_Size() int { + return m.Size() +} +func (m *TransferLeader) XXX_DiscardUnknown() { + xxx_messageInfo_TransferLeader.DiscardUnknown(m) } -func (m *TransferLeader) Reset() { *m = TransferLeader{} } -func (m *TransferLeader) String() string { return proto.CompactTextString(m) } -func (*TransferLeader) ProtoMessage() {} -func (*TransferLeader) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{31} } +var xxx_messageInfo_TransferLeader proto.InternalMessageInfo func (m *TransferLeader) GetPeer() *metapb.Peer { if m != nil { @@ -993,13 +1928,44 @@ func (m *TransferLeader) GetPeer() *metapb.Peer { } type Merge struct { - Target *metapb.Region `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` + Target *metapb.Region `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Merge) Reset() { *m = Merge{} } +func (m *Merge) String() string { return proto.CompactTextString(m) } +func (*Merge) ProtoMessage() {} +func (*Merge) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{32} +} +func (m *Merge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Merge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Merge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Merge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Merge.Merge(dst, src) +} +func (m *Merge) XXX_Size() int { + return m.Size() +} +func (m *Merge) XXX_DiscardUnknown() { + xxx_messageInfo_Merge.DiscardUnknown(m) } -func (m *Merge) Reset() { *m = Merge{} } -func (m *Merge) String() string { return proto.CompactTextString(m) } -func (*Merge) ProtoMessage() {} -func (*Merge) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{32} } +var xxx_messageInfo_Merge proto.InternalMessageInfo func (m *Merge) GetTarget() *metapb.Region { if m != nil { @@ -1009,13 +1975,44 @@ func (m *Merge) GetTarget() *metapb.Region { } type SplitRegion struct { - Policy CheckPolicy `protobuf:"varint,1,opt,name=policy,proto3,enum=pdpb.CheckPolicy" json:"policy,omitempty"` + Policy CheckPolicy `protobuf:"varint,1,opt,name=policy,proto3,enum=pdpb.CheckPolicy" json:"policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SplitRegion) Reset() { *m = SplitRegion{} } +func (m *SplitRegion) String() string { return proto.CompactTextString(m) } +func (*SplitRegion) ProtoMessage() {} +func (*SplitRegion) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{33} +} +func (m *SplitRegion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SplitRegion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SplitRegion.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SplitRegion) XXX_Merge(src proto.Message) { + xxx_messageInfo_SplitRegion.Merge(dst, src) +} +func (m *SplitRegion) XXX_Size() int { + return m.Size() +} +func (m *SplitRegion) XXX_DiscardUnknown() { + xxx_messageInfo_SplitRegion.DiscardUnknown(m) } -func (m *SplitRegion) Reset() { *m = SplitRegion{} } -func (m *SplitRegion) String() string { return proto.CompactTextString(m) } -func (*SplitRegion) ProtoMessage() {} -func (*SplitRegion) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{33} } +var xxx_messageInfo_SplitRegion proto.InternalMessageInfo func (m *SplitRegion) GetPolicy() CheckPolicy { if m != nil { @@ -1050,13 +2047,44 @@ type RegionHeartbeatResponse struct { TargetPeer *metapb.Peer `protobuf:"bytes,6,opt,name=target_peer,json=targetPeer" json:"target_peer,omitempty"` Merge *Merge `protobuf:"bytes,7,opt,name=merge" json:"merge,omitempty"` // PD sends split_region to let TiKV split a region into two regions. - SplitRegion *SplitRegion `protobuf:"bytes,8,opt,name=split_region,json=splitRegion" json:"split_region,omitempty"` + SplitRegion *SplitRegion `protobuf:"bytes,8,opt,name=split_region,json=splitRegion" json:"split_region,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegionHeartbeatResponse) Reset() { *m = RegionHeartbeatResponse{} } +func (m *RegionHeartbeatResponse) String() string { return proto.CompactTextString(m) } +func (*RegionHeartbeatResponse) ProtoMessage() {} +func (*RegionHeartbeatResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{34} +} +func (m *RegionHeartbeatResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegionHeartbeatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegionHeartbeatResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RegionHeartbeatResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegionHeartbeatResponse.Merge(dst, src) +} +func (m *RegionHeartbeatResponse) XXX_Size() int { + return m.Size() +} +func (m *RegionHeartbeatResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegionHeartbeatResponse.DiscardUnknown(m) } -func (m *RegionHeartbeatResponse) Reset() { *m = RegionHeartbeatResponse{} } -func (m *RegionHeartbeatResponse) String() string { return proto.CompactTextString(m) } -func (*RegionHeartbeatResponse) ProtoMessage() {} -func (*RegionHeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{34} } +var xxx_messageInfo_RegionHeartbeatResponse proto.InternalMessageInfo func (m *RegionHeartbeatResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1115,14 +2143,45 @@ func (m *RegionHeartbeatResponse) GetSplitRegion() *SplitRegion { } type AskSplitRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AskSplitRequest) Reset() { *m = AskSplitRequest{} } +func (m *AskSplitRequest) String() string { return proto.CompactTextString(m) } +func (*AskSplitRequest) ProtoMessage() {} +func (*AskSplitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{35} +} +func (m *AskSplitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AskSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AskSplitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AskSplitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AskSplitRequest.Merge(dst, src) +} +func (m *AskSplitRequest) XXX_Size() int { + return m.Size() +} +func (m *AskSplitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AskSplitRequest.DiscardUnknown(m) } -func (m *AskSplitRequest) Reset() { *m = AskSplitRequest{} } -func (m *AskSplitRequest) String() string { return proto.CompactTextString(m) } -func (*AskSplitRequest) ProtoMessage() {} -func (*AskSplitRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{35} } +var xxx_messageInfo_AskSplitRequest proto.InternalMessageInfo func (m *AskSplitRequest) GetHeader() *RequestHeader { if m != nil { @@ -1145,13 +2204,44 @@ type AskSplitResponse struct { // We must guarantee that the new_region_id is global unique. NewRegionId uint64 `protobuf:"varint,2,opt,name=new_region_id,json=newRegionId,proto3" json:"new_region_id,omitempty"` // The peer ids for the new split region. - NewPeerIds []uint64 `protobuf:"varint,3,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"` + NewPeerIds []uint64 `protobuf:"varint,3,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AskSplitResponse) Reset() { *m = AskSplitResponse{} } +func (m *AskSplitResponse) String() string { return proto.CompactTextString(m) } +func (*AskSplitResponse) ProtoMessage() {} +func (*AskSplitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{36} +} +func (m *AskSplitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AskSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AskSplitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AskSplitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AskSplitResponse.Merge(dst, src) +} +func (m *AskSplitResponse) XXX_Size() int { + return m.Size() +} +func (m *AskSplitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AskSplitResponse.DiscardUnknown(m) } -func (m *AskSplitResponse) Reset() { *m = AskSplitResponse{} } -func (m *AskSplitResponse) String() string { return proto.CompactTextString(m) } -func (*AskSplitResponse) ProtoMessage() {} -func (*AskSplitResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{36} } +var xxx_messageInfo_AskSplitResponse proto.InternalMessageInfo func (m *AskSplitResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1175,15 +2265,46 @@ func (m *AskSplitResponse) GetNewPeerIds() []uint64 { } type ReportSplitRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Left *metapb.Region `protobuf:"bytes,2,opt,name=left" json:"left,omitempty"` - Right *metapb.Region `protobuf:"bytes,3,opt,name=right" json:"right,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Left *metapb.Region `protobuf:"bytes,2,opt,name=left" json:"left,omitempty"` + Right *metapb.Region `protobuf:"bytes,3,opt,name=right" json:"right,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReportSplitRequest) Reset() { *m = ReportSplitRequest{} } +func (m *ReportSplitRequest) String() string { return proto.CompactTextString(m) } +func (*ReportSplitRequest) ProtoMessage() {} +func (*ReportSplitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{37} +} +func (m *ReportSplitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReportSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReportSplitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReportSplitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReportSplitRequest.Merge(dst, src) +} +func (m *ReportSplitRequest) XXX_Size() int { + return m.Size() +} +func (m *ReportSplitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReportSplitRequest.DiscardUnknown(m) } -func (m *ReportSplitRequest) Reset() { *m = ReportSplitRequest{} } -func (m *ReportSplitRequest) String() string { return proto.CompactTextString(m) } -func (*ReportSplitRequest) ProtoMessage() {} -func (*ReportSplitRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{37} } +var xxx_messageInfo_ReportSplitRequest proto.InternalMessageInfo func (m *ReportSplitRequest) GetHeader() *RequestHeader { if m != nil { @@ -1207,13 +2328,44 @@ func (m *ReportSplitRequest) GetRight() *metapb.Region { } type ReportSplitResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReportSplitResponse) Reset() { *m = ReportSplitResponse{} } +func (m *ReportSplitResponse) String() string { return proto.CompactTextString(m) } +func (*ReportSplitResponse) ProtoMessage() {} +func (*ReportSplitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{38} +} +func (m *ReportSplitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReportSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReportSplitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReportSplitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReportSplitResponse.Merge(dst, src) +} +func (m *ReportSplitResponse) XXX_Size() int { + return m.Size() +} +func (m *ReportSplitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReportSplitResponse.DiscardUnknown(m) } -func (m *ReportSplitResponse) Reset() { *m = ReportSplitResponse{} } -func (m *ReportSplitResponse) String() string { return proto.CompactTextString(m) } -func (*ReportSplitResponse) ProtoMessage() {} -func (*ReportSplitResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{38} } +var xxx_messageInfo_ReportSplitResponse proto.InternalMessageInfo func (m *ReportSplitResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1223,15 +2375,46 @@ func (m *ReportSplitResponse) GetHeader() *ResponseHeader { } type AskBatchSplitRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` - SplitCount uint32 `protobuf:"varint,3,opt,name=split_count,json=splitCount,proto3" json:"split_count,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + SplitCount uint32 `protobuf:"varint,3,opt,name=split_count,json=splitCount,proto3" json:"split_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AskBatchSplitRequest) Reset() { *m = AskBatchSplitRequest{} } +func (m *AskBatchSplitRequest) String() string { return proto.CompactTextString(m) } +func (*AskBatchSplitRequest) ProtoMessage() {} +func (*AskBatchSplitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{39} +} +func (m *AskBatchSplitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AskBatchSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AskBatchSplitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AskBatchSplitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AskBatchSplitRequest.Merge(dst, src) +} +func (m *AskBatchSplitRequest) XXX_Size() int { + return m.Size() +} +func (m *AskBatchSplitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AskBatchSplitRequest.DiscardUnknown(m) } -func (m *AskBatchSplitRequest) Reset() { *m = AskBatchSplitRequest{} } -func (m *AskBatchSplitRequest) String() string { return proto.CompactTextString(m) } -func (*AskBatchSplitRequest) ProtoMessage() {} -func (*AskBatchSplitRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{39} } +var xxx_messageInfo_AskBatchSplitRequest proto.InternalMessageInfo func (m *AskBatchSplitRequest) GetHeader() *RequestHeader { if m != nil { @@ -1255,14 +2438,45 @@ func (m *AskBatchSplitRequest) GetSplitCount() uint32 { } type SplitID struct { - NewRegionId uint64 `protobuf:"varint,1,opt,name=new_region_id,json=newRegionId,proto3" json:"new_region_id,omitempty"` - NewPeerIds []uint64 `protobuf:"varint,2,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"` + NewRegionId uint64 `protobuf:"varint,1,opt,name=new_region_id,json=newRegionId,proto3" json:"new_region_id,omitempty"` + NewPeerIds []uint64 `protobuf:"varint,2,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SplitID) Reset() { *m = SplitID{} } +func (m *SplitID) String() string { return proto.CompactTextString(m) } +func (*SplitID) ProtoMessage() {} +func (*SplitID) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{40} +} +func (m *SplitID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SplitID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SplitID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SplitID) XXX_Merge(src proto.Message) { + xxx_messageInfo_SplitID.Merge(dst, src) +} +func (m *SplitID) XXX_Size() int { + return m.Size() +} +func (m *SplitID) XXX_DiscardUnknown() { + xxx_messageInfo_SplitID.DiscardUnknown(m) } -func (m *SplitID) Reset() { *m = SplitID{} } -func (m *SplitID) String() string { return proto.CompactTextString(m) } -func (*SplitID) ProtoMessage() {} -func (*SplitID) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{40} } +var xxx_messageInfo_SplitID proto.InternalMessageInfo func (m *SplitID) GetNewRegionId() uint64 { if m != nil { @@ -1279,14 +2493,45 @@ func (m *SplitID) GetNewPeerIds() []uint64 { } type AskBatchSplitResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Ids []*SplitID `protobuf:"bytes,2,rep,name=ids" json:"ids,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Ids []*SplitID `protobuf:"bytes,2,rep,name=ids" json:"ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AskBatchSplitResponse) Reset() { *m = AskBatchSplitResponse{} } +func (m *AskBatchSplitResponse) String() string { return proto.CompactTextString(m) } +func (*AskBatchSplitResponse) ProtoMessage() {} +func (*AskBatchSplitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{41} +} +func (m *AskBatchSplitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AskBatchSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AskBatchSplitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AskBatchSplitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AskBatchSplitResponse.Merge(dst, src) +} +func (m *AskBatchSplitResponse) XXX_Size() int { + return m.Size() +} +func (m *AskBatchSplitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AskBatchSplitResponse.DiscardUnknown(m) } -func (m *AskBatchSplitResponse) Reset() { *m = AskBatchSplitResponse{} } -func (m *AskBatchSplitResponse) String() string { return proto.CompactTextString(m) } -func (*AskBatchSplitResponse) ProtoMessage() {} -func (*AskBatchSplitResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{41} } +var xxx_messageInfo_AskBatchSplitResponse proto.InternalMessageInfo func (m *AskBatchSplitResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1303,14 +2548,45 @@ func (m *AskBatchSplitResponse) GetIds() []*SplitID { } type ReportBatchSplitRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Regions []*metapb.Region `protobuf:"bytes,2,rep,name=regions" json:"regions,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Regions []*metapb.Region `protobuf:"bytes,2,rep,name=regions" json:"regions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReportBatchSplitRequest) Reset() { *m = ReportBatchSplitRequest{} } +func (m *ReportBatchSplitRequest) String() string { return proto.CompactTextString(m) } +func (*ReportBatchSplitRequest) ProtoMessage() {} +func (*ReportBatchSplitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{42} +} +func (m *ReportBatchSplitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReportBatchSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReportBatchSplitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReportBatchSplitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReportBatchSplitRequest.Merge(dst, src) +} +func (m *ReportBatchSplitRequest) XXX_Size() int { + return m.Size() +} +func (m *ReportBatchSplitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReportBatchSplitRequest.DiscardUnknown(m) } -func (m *ReportBatchSplitRequest) Reset() { *m = ReportBatchSplitRequest{} } -func (m *ReportBatchSplitRequest) String() string { return proto.CompactTextString(m) } -func (*ReportBatchSplitRequest) ProtoMessage() {} -func (*ReportBatchSplitRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{42} } +var xxx_messageInfo_ReportBatchSplitRequest proto.InternalMessageInfo func (m *ReportBatchSplitRequest) GetHeader() *RequestHeader { if m != nil { @@ -1327,13 +2603,44 @@ func (m *ReportBatchSplitRequest) GetRegions() []*metapb.Region { } type ReportBatchSplitResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReportBatchSplitResponse) Reset() { *m = ReportBatchSplitResponse{} } +func (m *ReportBatchSplitResponse) String() string { return proto.CompactTextString(m) } +func (*ReportBatchSplitResponse) ProtoMessage() {} +func (*ReportBatchSplitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{43} +} +func (m *ReportBatchSplitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReportBatchSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReportBatchSplitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReportBatchSplitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReportBatchSplitResponse.Merge(dst, src) +} +func (m *ReportBatchSplitResponse) XXX_Size() int { + return m.Size() +} +func (m *ReportBatchSplitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReportBatchSplitResponse.DiscardUnknown(m) } -func (m *ReportBatchSplitResponse) Reset() { *m = ReportBatchSplitResponse{} } -func (m *ReportBatchSplitResponse) String() string { return proto.CompactTextString(m) } -func (*ReportBatchSplitResponse) ProtoMessage() {} -func (*ReportBatchSplitResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{43} } +var xxx_messageInfo_ReportBatchSplitResponse proto.InternalMessageInfo func (m *ReportBatchSplitResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1346,13 +2653,44 @@ type TimeInterval struct { // The unix timestamp in seconds of the start of this period. StartTimestamp uint64 `protobuf:"varint,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` // The unix timestamp in seconds of the end of this period. - EndTimestamp uint64 `protobuf:"varint,2,opt,name=end_timestamp,json=endTimestamp,proto3" json:"end_timestamp,omitempty"` + EndTimestamp uint64 `protobuf:"varint,2,opt,name=end_timestamp,json=endTimestamp,proto3" json:"end_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeInterval) Reset() { *m = TimeInterval{} } +func (m *TimeInterval) String() string { return proto.CompactTextString(m) } +func (*TimeInterval) ProtoMessage() {} +func (*TimeInterval) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{44} +} +func (m *TimeInterval) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeInterval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeInterval.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TimeInterval) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeInterval.Merge(dst, src) +} +func (m *TimeInterval) XXX_Size() int { + return m.Size() +} +func (m *TimeInterval) XXX_DiscardUnknown() { + xxx_messageInfo_TimeInterval.DiscardUnknown(m) } -func (m *TimeInterval) Reset() { *m = TimeInterval{} } -func (m *TimeInterval) String() string { return proto.CompactTextString(m) } -func (*TimeInterval) ProtoMessage() {} -func (*TimeInterval) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{44} } +var xxx_messageInfo_TimeInterval proto.InternalMessageInfo func (m *TimeInterval) GetStartTimestamp() uint64 { if m != nil { @@ -1397,13 +2735,44 @@ type StoreStats struct { // Keys read for the store during this period. KeysRead uint64 `protobuf:"varint,14,opt,name=keys_read,json=keysRead,proto3" json:"keys_read,omitempty"` // Actually reported time interval - Interval *TimeInterval `protobuf:"bytes,15,opt,name=interval" json:"interval,omitempty"` + Interval *TimeInterval `protobuf:"bytes,15,opt,name=interval" json:"interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreStats) Reset() { *m = StoreStats{} } +func (m *StoreStats) String() string { return proto.CompactTextString(m) } +func (*StoreStats) ProtoMessage() {} +func (*StoreStats) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{45} +} +func (m *StoreStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreStats.Merge(dst, src) +} +func (m *StoreStats) XXX_Size() int { + return m.Size() +} +func (m *StoreStats) XXX_DiscardUnknown() { + xxx_messageInfo_StoreStats.DiscardUnknown(m) } -func (m *StoreStats) Reset() { *m = StoreStats{} } -func (m *StoreStats) String() string { return proto.CompactTextString(m) } -func (*StoreStats) ProtoMessage() {} -func (*StoreStats) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{45} } +var xxx_messageInfo_StoreStats proto.InternalMessageInfo func (m *StoreStats) GetStoreId() uint64 { if m != nil { @@ -1511,14 +2880,45 @@ func (m *StoreStats) GetInterval() *TimeInterval { } type StoreHeartbeatRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Stats *StoreStats `protobuf:"bytes,2,opt,name=stats" json:"stats,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Stats *StoreStats `protobuf:"bytes,2,opt,name=stats" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreHeartbeatRequest) Reset() { *m = StoreHeartbeatRequest{} } +func (m *StoreHeartbeatRequest) String() string { return proto.CompactTextString(m) } +func (*StoreHeartbeatRequest) ProtoMessage() {} +func (*StoreHeartbeatRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{46} +} +func (m *StoreHeartbeatRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreHeartbeatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreHeartbeatRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreHeartbeatRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreHeartbeatRequest.Merge(dst, src) +} +func (m *StoreHeartbeatRequest) XXX_Size() int { + return m.Size() +} +func (m *StoreHeartbeatRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StoreHeartbeatRequest.DiscardUnknown(m) } -func (m *StoreHeartbeatRequest) Reset() { *m = StoreHeartbeatRequest{} } -func (m *StoreHeartbeatRequest) String() string { return proto.CompactTextString(m) } -func (*StoreHeartbeatRequest) ProtoMessage() {} -func (*StoreHeartbeatRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{46} } +var xxx_messageInfo_StoreHeartbeatRequest proto.InternalMessageInfo func (m *StoreHeartbeatRequest) GetHeader() *RequestHeader { if m != nil { @@ -1535,13 +2935,44 @@ func (m *StoreHeartbeatRequest) GetStats() *StoreStats { } type StoreHeartbeatResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreHeartbeatResponse) Reset() { *m = StoreHeartbeatResponse{} } +func (m *StoreHeartbeatResponse) String() string { return proto.CompactTextString(m) } +func (*StoreHeartbeatResponse) ProtoMessage() {} +func (*StoreHeartbeatResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{47} +} +func (m *StoreHeartbeatResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreHeartbeatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreHeartbeatResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreHeartbeatResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreHeartbeatResponse.Merge(dst, src) +} +func (m *StoreHeartbeatResponse) XXX_Size() int { + return m.Size() +} +func (m *StoreHeartbeatResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StoreHeartbeatResponse.DiscardUnknown(m) } -func (m *StoreHeartbeatResponse) Reset() { *m = StoreHeartbeatResponse{} } -func (m *StoreHeartbeatResponse) String() string { return proto.CompactTextString(m) } -func (*StoreHeartbeatResponse) ProtoMessage() {} -func (*StoreHeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{47} } +var xxx_messageInfo_StoreHeartbeatResponse proto.InternalMessageInfo func (m *StoreHeartbeatResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1555,14 +2986,45 @@ type ScatterRegionRequest struct { RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` // PD will use these region information if it can't find the region. // For example, the region is just split and hasn't report to PD yet. - Region *metapb.Region `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - Leader *metapb.Peer `protobuf:"bytes,4,opt,name=leader" json:"leader,omitempty"` + Region *metapb.Region `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + Leader *metapb.Peer `protobuf:"bytes,4,opt,name=leader" json:"leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScatterRegionRequest) Reset() { *m = ScatterRegionRequest{} } +func (m *ScatterRegionRequest) String() string { return proto.CompactTextString(m) } +func (*ScatterRegionRequest) ProtoMessage() {} +func (*ScatterRegionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{48} +} +func (m *ScatterRegionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScatterRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScatterRegionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ScatterRegionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScatterRegionRequest.Merge(dst, src) +} +func (m *ScatterRegionRequest) XXX_Size() int { + return m.Size() +} +func (m *ScatterRegionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ScatterRegionRequest.DiscardUnknown(m) } -func (m *ScatterRegionRequest) Reset() { *m = ScatterRegionRequest{} } -func (m *ScatterRegionRequest) String() string { return proto.CompactTextString(m) } -func (*ScatterRegionRequest) ProtoMessage() {} -func (*ScatterRegionRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{48} } +var xxx_messageInfo_ScatterRegionRequest proto.InternalMessageInfo func (m *ScatterRegionRequest) GetHeader() *RequestHeader { if m != nil { @@ -1593,13 +3055,44 @@ func (m *ScatterRegionRequest) GetLeader() *metapb.Peer { } type ScatterRegionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScatterRegionResponse) Reset() { *m = ScatterRegionResponse{} } +func (m *ScatterRegionResponse) String() string { return proto.CompactTextString(m) } +func (*ScatterRegionResponse) ProtoMessage() {} +func (*ScatterRegionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{49} +} +func (m *ScatterRegionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScatterRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScatterRegionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ScatterRegionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScatterRegionResponse.Merge(dst, src) +} +func (m *ScatterRegionResponse) XXX_Size() int { + return m.Size() +} +func (m *ScatterRegionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ScatterRegionResponse.DiscardUnknown(m) } -func (m *ScatterRegionResponse) Reset() { *m = ScatterRegionResponse{} } -func (m *ScatterRegionResponse) String() string { return proto.CompactTextString(m) } -func (*ScatterRegionResponse) ProtoMessage() {} -func (*ScatterRegionResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{49} } +var xxx_messageInfo_ScatterRegionResponse proto.InternalMessageInfo func (m *ScatterRegionResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1609,13 +3102,44 @@ func (m *ScatterRegionResponse) GetHeader() *ResponseHeader { } type GetGCSafePointRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGCSafePointRequest) Reset() { *m = GetGCSafePointRequest{} } +func (m *GetGCSafePointRequest) String() string { return proto.CompactTextString(m) } +func (*GetGCSafePointRequest) ProtoMessage() {} +func (*GetGCSafePointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{50} +} +func (m *GetGCSafePointRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetGCSafePointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetGCSafePointRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetGCSafePointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGCSafePointRequest.Merge(dst, src) +} +func (m *GetGCSafePointRequest) XXX_Size() int { + return m.Size() +} +func (m *GetGCSafePointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetGCSafePointRequest.DiscardUnknown(m) } -func (m *GetGCSafePointRequest) Reset() { *m = GetGCSafePointRequest{} } -func (m *GetGCSafePointRequest) String() string { return proto.CompactTextString(m) } -func (*GetGCSafePointRequest) ProtoMessage() {} -func (*GetGCSafePointRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{50} } +var xxx_messageInfo_GetGCSafePointRequest proto.InternalMessageInfo func (m *GetGCSafePointRequest) GetHeader() *RequestHeader { if m != nil { @@ -1625,14 +3149,45 @@ func (m *GetGCSafePointRequest) GetHeader() *RequestHeader { } type GetGCSafePointResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGCSafePointResponse) Reset() { *m = GetGCSafePointResponse{} } +func (m *GetGCSafePointResponse) String() string { return proto.CompactTextString(m) } +func (*GetGCSafePointResponse) ProtoMessage() {} +func (*GetGCSafePointResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{51} +} +func (m *GetGCSafePointResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetGCSafePointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetGCSafePointResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetGCSafePointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGCSafePointResponse.Merge(dst, src) +} +func (m *GetGCSafePointResponse) XXX_Size() int { + return m.Size() +} +func (m *GetGCSafePointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetGCSafePointResponse.DiscardUnknown(m) } -func (m *GetGCSafePointResponse) Reset() { *m = GetGCSafePointResponse{} } -func (m *GetGCSafePointResponse) String() string { return proto.CompactTextString(m) } -func (*GetGCSafePointResponse) ProtoMessage() {} -func (*GetGCSafePointResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{51} } +var xxx_messageInfo_GetGCSafePointResponse proto.InternalMessageInfo func (m *GetGCSafePointResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1649,14 +3204,45 @@ func (m *GetGCSafePointResponse) GetSafePoint() uint64 { } type UpdateGCSafePointRequest struct { - Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"` + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateGCSafePointRequest) Reset() { *m = UpdateGCSafePointRequest{} } +func (m *UpdateGCSafePointRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateGCSafePointRequest) ProtoMessage() {} +func (*UpdateGCSafePointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{52} +} +func (m *UpdateGCSafePointRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateGCSafePointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateGCSafePointRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UpdateGCSafePointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateGCSafePointRequest.Merge(dst, src) +} +func (m *UpdateGCSafePointRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateGCSafePointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateGCSafePointRequest.DiscardUnknown(m) } -func (m *UpdateGCSafePointRequest) Reset() { *m = UpdateGCSafePointRequest{} } -func (m *UpdateGCSafePointRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateGCSafePointRequest) ProtoMessage() {} -func (*UpdateGCSafePointRequest) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{52} } +var xxx_messageInfo_UpdateGCSafePointRequest proto.InternalMessageInfo func (m *UpdateGCSafePointRequest) GetHeader() *RequestHeader { if m != nil { @@ -1673,14 +3259,45 @@ func (m *UpdateGCSafePointRequest) GetSafePoint() uint64 { } type UpdateGCSafePointResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - NewSafePoint uint64 `protobuf:"varint,2,opt,name=new_safe_point,json=newSafePoint,proto3" json:"new_safe_point,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + NewSafePoint uint64 `protobuf:"varint,2,opt,name=new_safe_point,json=newSafePoint,proto3" json:"new_safe_point,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateGCSafePointResponse) Reset() { *m = UpdateGCSafePointResponse{} } +func (m *UpdateGCSafePointResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateGCSafePointResponse) ProtoMessage() {} +func (*UpdateGCSafePointResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{53} +} +func (m *UpdateGCSafePointResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateGCSafePointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateGCSafePointResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UpdateGCSafePointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateGCSafePointResponse.Merge(dst, src) +} +func (m *UpdateGCSafePointResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateGCSafePointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateGCSafePointResponse.DiscardUnknown(m) } -func (m *UpdateGCSafePointResponse) Reset() { *m = UpdateGCSafePointResponse{} } -func (m *UpdateGCSafePointResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateGCSafePointResponse) ProtoMessage() {} -func (*UpdateGCSafePointResponse) Descriptor() ([]byte, []int) { return fileDescriptorPdpb, []int{53} } +var xxx_messageInfo_UpdateGCSafePointResponse proto.InternalMessageInfo func (m *UpdateGCSafePointResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1696,6 +3313,116 @@ func (m *UpdateGCSafePointResponse) GetNewSafePoint() uint64 { return 0 } +type SyncRegionRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SyncRegionRequest) Reset() { *m = SyncRegionRequest{} } +func (m *SyncRegionRequest) String() string { return proto.CompactTextString(m) } +func (*SyncRegionRequest) ProtoMessage() {} +func (*SyncRegionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{54} +} +func (m *SyncRegionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SyncRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SyncRegionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SyncRegionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SyncRegionRequest.Merge(dst, src) +} +func (m *SyncRegionRequest) XXX_Size() int { + return m.Size() +} +func (m *SyncRegionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SyncRegionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SyncRegionRequest proto.InternalMessageInfo + +func (m *SyncRegionRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SyncRegionRequest) GetMember() *Member { + if m != nil { + return m.Member + } + return nil +} + +type SyncRegionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Regions []*metapb.Region `protobuf:"bytes,2,rep,name=regions" json:"regions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SyncRegionResponse) Reset() { *m = SyncRegionResponse{} } +func (m *SyncRegionResponse) String() string { return proto.CompactTextString(m) } +func (*SyncRegionResponse) ProtoMessage() {} +func (*SyncRegionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_pdpb_3ac9e95c85c25438, []int{55} +} +func (m *SyncRegionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SyncRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SyncRegionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SyncRegionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SyncRegionResponse.Merge(dst, src) +} +func (m *SyncRegionResponse) XXX_Size() int { + return m.Size() +} +func (m *SyncRegionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SyncRegionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SyncRegionResponse proto.InternalMessageInfo + +func (m *SyncRegionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SyncRegionResponse) GetRegions() []*metapb.Region { + if m != nil { + return m.Regions + } + return nil +} + func init() { proto.RegisterType((*RequestHeader)(nil), "pdpb.RequestHeader") proto.RegisterType((*ResponseHeader)(nil), "pdpb.ResponseHeader") @@ -1751,6 +3478,8 @@ func init() { proto.RegisterType((*GetGCSafePointResponse)(nil), "pdpb.GetGCSafePointResponse") proto.RegisterType((*UpdateGCSafePointRequest)(nil), "pdpb.UpdateGCSafePointRequest") proto.RegisterType((*UpdateGCSafePointResponse)(nil), "pdpb.UpdateGCSafePointResponse") + proto.RegisterType((*SyncRegionRequest)(nil), "pdpb.SyncRegionRequest") + proto.RegisterType((*SyncRegionResponse)(nil), "pdpb.SyncRegionResponse") proto.RegisterEnum("pdpb.ErrorType", ErrorType_name, ErrorType_value) proto.RegisterEnum("pdpb.CheckPolicy", CheckPolicy_name, CheckPolicy_value) } @@ -1763,8 +3492,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for PD service - +// PDClient is the client API for PD service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type PDClient interface { // GetMembers get the member list of this cluster. It does not require // the cluster_id in request matchs the id of this cluster. @@ -1790,6 +3520,7 @@ type PDClient interface { ScatterRegion(ctx context.Context, in *ScatterRegionRequest, opts ...grpc.CallOption) (*ScatterRegionResponse, error) GetGCSafePoint(ctx context.Context, in *GetGCSafePointRequest, opts ...grpc.CallOption) (*GetGCSafePointResponse, error) UpdateGCSafePoint(ctx context.Context, in *UpdateGCSafePointRequest, opts ...grpc.CallOption) (*UpdateGCSafePointResponse, error) + SyncRegions(ctx context.Context, opts ...grpc.CallOption) (PD_SyncRegionsClient, error) } type pDClient struct { @@ -1802,7 +3533,7 @@ func NewPDClient(cc *grpc.ClientConn) PDClient { func (c *pDClient) GetMembers(ctx context.Context, in *GetMembersRequest, opts ...grpc.CallOption) (*GetMembersResponse, error) { out := new(GetMembersResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/GetMembers", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/GetMembers", in, out, opts...) if err != nil { return nil, err } @@ -1810,7 +3541,7 @@ func (c *pDClient) GetMembers(ctx context.Context, in *GetMembersRequest, opts . } func (c *pDClient) Tso(ctx context.Context, opts ...grpc.CallOption) (PD_TsoClient, error) { - stream, err := grpc.NewClientStream(ctx, &_PD_serviceDesc.Streams[0], c.cc, "/pdpb.PD/Tso", opts...) + stream, err := c.cc.NewStream(ctx, &_PD_serviceDesc.Streams[0], "/pdpb.PD/Tso", opts...) if err != nil { return nil, err } @@ -1842,7 +3573,7 @@ func (x *pDTsoClient) Recv() (*TsoResponse, error) { func (c *pDClient) Bootstrap(ctx context.Context, in *BootstrapRequest, opts ...grpc.CallOption) (*BootstrapResponse, error) { out := new(BootstrapResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/Bootstrap", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/Bootstrap", in, out, opts...) if err != nil { return nil, err } @@ -1851,7 +3582,7 @@ func (c *pDClient) Bootstrap(ctx context.Context, in *BootstrapRequest, opts ... func (c *pDClient) IsBootstrapped(ctx context.Context, in *IsBootstrappedRequest, opts ...grpc.CallOption) (*IsBootstrappedResponse, error) { out := new(IsBootstrappedResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/IsBootstrapped", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/IsBootstrapped", in, out, opts...) if err != nil { return nil, err } @@ -1860,7 +3591,7 @@ func (c *pDClient) IsBootstrapped(ctx context.Context, in *IsBootstrappedRequest func (c *pDClient) AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error) { out := new(AllocIDResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/AllocID", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/AllocID", in, out, opts...) if err != nil { return nil, err } @@ -1869,7 +3600,7 @@ func (c *pDClient) AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc func (c *pDClient) GetStore(ctx context.Context, in *GetStoreRequest, opts ...grpc.CallOption) (*GetStoreResponse, error) { out := new(GetStoreResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/GetStore", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/GetStore", in, out, opts...) if err != nil { return nil, err } @@ -1878,7 +3609,7 @@ func (c *pDClient) GetStore(ctx context.Context, in *GetStoreRequest, opts ...gr func (c *pDClient) PutStore(ctx context.Context, in *PutStoreRequest, opts ...grpc.CallOption) (*PutStoreResponse, error) { out := new(PutStoreResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/PutStore", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/PutStore", in, out, opts...) if err != nil { return nil, err } @@ -1887,7 +3618,7 @@ func (c *pDClient) PutStore(ctx context.Context, in *PutStoreRequest, opts ...gr func (c *pDClient) GetAllStores(ctx context.Context, in *GetAllStoresRequest, opts ...grpc.CallOption) (*GetAllStoresResponse, error) { out := new(GetAllStoresResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/GetAllStores", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/GetAllStores", in, out, opts...) if err != nil { return nil, err } @@ -1896,7 +3627,7 @@ func (c *pDClient) GetAllStores(ctx context.Context, in *GetAllStoresRequest, op func (c *pDClient) StoreHeartbeat(ctx context.Context, in *StoreHeartbeatRequest, opts ...grpc.CallOption) (*StoreHeartbeatResponse, error) { out := new(StoreHeartbeatResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/StoreHeartbeat", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/StoreHeartbeat", in, out, opts...) if err != nil { return nil, err } @@ -1904,7 +3635,7 @@ func (c *pDClient) StoreHeartbeat(ctx context.Context, in *StoreHeartbeatRequest } func (c *pDClient) RegionHeartbeat(ctx context.Context, opts ...grpc.CallOption) (PD_RegionHeartbeatClient, error) { - stream, err := grpc.NewClientStream(ctx, &_PD_serviceDesc.Streams[1], c.cc, "/pdpb.PD/RegionHeartbeat", opts...) + stream, err := c.cc.NewStream(ctx, &_PD_serviceDesc.Streams[1], "/pdpb.PD/RegionHeartbeat", opts...) if err != nil { return nil, err } @@ -1936,7 +3667,7 @@ func (x *pDRegionHeartbeatClient) Recv() (*RegionHeartbeatResponse, error) { func (c *pDClient) GetRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) { out := new(GetRegionResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/GetRegion", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/GetRegion", in, out, opts...) if err != nil { return nil, err } @@ -1945,7 +3676,7 @@ func (c *pDClient) GetRegion(ctx context.Context, in *GetRegionRequest, opts ... func (c *pDClient) GetPrevRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) { out := new(GetRegionResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/GetPrevRegion", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/GetPrevRegion", in, out, opts...) if err != nil { return nil, err } @@ -1954,25 +3685,27 @@ func (c *pDClient) GetPrevRegion(ctx context.Context, in *GetRegionRequest, opts func (c *pDClient) GetRegionByID(ctx context.Context, in *GetRegionByIDRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) { out := new(GetRegionResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/GetRegionByID", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/GetRegionByID", in, out, opts...) if err != nil { return nil, err } return out, nil } +// Deprecated: Do not use. func (c *pDClient) AskSplit(ctx context.Context, in *AskSplitRequest, opts ...grpc.CallOption) (*AskSplitResponse, error) { out := new(AskSplitResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/AskSplit", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/AskSplit", in, out, opts...) if err != nil { return nil, err } return out, nil } +// Deprecated: Do not use. func (c *pDClient) ReportSplit(ctx context.Context, in *ReportSplitRequest, opts ...grpc.CallOption) (*ReportSplitResponse, error) { out := new(ReportSplitResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/ReportSplit", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/ReportSplit", in, out, opts...) if err != nil { return nil, err } @@ -1981,7 +3714,7 @@ func (c *pDClient) ReportSplit(ctx context.Context, in *ReportSplitRequest, opts func (c *pDClient) AskBatchSplit(ctx context.Context, in *AskBatchSplitRequest, opts ...grpc.CallOption) (*AskBatchSplitResponse, error) { out := new(AskBatchSplitResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/AskBatchSplit", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/AskBatchSplit", in, out, opts...) if err != nil { return nil, err } @@ -1990,7 +3723,7 @@ func (c *pDClient) AskBatchSplit(ctx context.Context, in *AskBatchSplitRequest, func (c *pDClient) ReportBatchSplit(ctx context.Context, in *ReportBatchSplitRequest, opts ...grpc.CallOption) (*ReportBatchSplitResponse, error) { out := new(ReportBatchSplitResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/ReportBatchSplit", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/ReportBatchSplit", in, out, opts...) if err != nil { return nil, err } @@ -1999,7 +3732,7 @@ func (c *pDClient) ReportBatchSplit(ctx context.Context, in *ReportBatchSplitReq func (c *pDClient) GetClusterConfig(ctx context.Context, in *GetClusterConfigRequest, opts ...grpc.CallOption) (*GetClusterConfigResponse, error) { out := new(GetClusterConfigResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/GetClusterConfig", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/GetClusterConfig", in, out, opts...) if err != nil { return nil, err } @@ -2008,7 +3741,7 @@ func (c *pDClient) GetClusterConfig(ctx context.Context, in *GetClusterConfigReq func (c *pDClient) PutClusterConfig(ctx context.Context, in *PutClusterConfigRequest, opts ...grpc.CallOption) (*PutClusterConfigResponse, error) { out := new(PutClusterConfigResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/PutClusterConfig", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/PutClusterConfig", in, out, opts...) if err != nil { return nil, err } @@ -2017,7 +3750,7 @@ func (c *pDClient) PutClusterConfig(ctx context.Context, in *PutClusterConfigReq func (c *pDClient) ScatterRegion(ctx context.Context, in *ScatterRegionRequest, opts ...grpc.CallOption) (*ScatterRegionResponse, error) { out := new(ScatterRegionResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/ScatterRegion", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/ScatterRegion", in, out, opts...) if err != nil { return nil, err } @@ -2026,7 +3759,7 @@ func (c *pDClient) ScatterRegion(ctx context.Context, in *ScatterRegionRequest, func (c *pDClient) GetGCSafePoint(ctx context.Context, in *GetGCSafePointRequest, opts ...grpc.CallOption) (*GetGCSafePointResponse, error) { out := new(GetGCSafePointResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/GetGCSafePoint", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/GetGCSafePoint", in, out, opts...) if err != nil { return nil, err } @@ -2035,15 +3768,45 @@ func (c *pDClient) GetGCSafePoint(ctx context.Context, in *GetGCSafePointRequest func (c *pDClient) UpdateGCSafePoint(ctx context.Context, in *UpdateGCSafePointRequest, opts ...grpc.CallOption) (*UpdateGCSafePointResponse, error) { out := new(UpdateGCSafePointResponse) - err := grpc.Invoke(ctx, "/pdpb.PD/UpdateGCSafePoint", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/pdpb.PD/UpdateGCSafePoint", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for PD service +func (c *pDClient) SyncRegions(ctx context.Context, opts ...grpc.CallOption) (PD_SyncRegionsClient, error) { + stream, err := c.cc.NewStream(ctx, &_PD_serviceDesc.Streams[2], "/pdpb.PD/SyncRegions", opts...) + if err != nil { + return nil, err + } + x := &pDSyncRegionsClient{stream} + return x, nil +} + +type PD_SyncRegionsClient interface { + Send(*SyncRegionRequest) error + Recv() (*SyncRegionResponse, error) + grpc.ClientStream +} + +type pDSyncRegionsClient struct { + grpc.ClientStream +} + +func (x *pDSyncRegionsClient) Send(m *SyncRegionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *pDSyncRegionsClient) Recv() (*SyncRegionResponse, error) { + m := new(SyncRegionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} +// PDServer is the server API for PD service. type PDServer interface { // GetMembers get the member list of this cluster. It does not require // the cluster_id in request matchs the id of this cluster. @@ -2069,6 +3832,7 @@ type PDServer interface { ScatterRegion(context.Context, *ScatterRegionRequest) (*ScatterRegionResponse, error) GetGCSafePoint(context.Context, *GetGCSafePointRequest) (*GetGCSafePointResponse, error) UpdateGCSafePoint(context.Context, *UpdateGCSafePointRequest) (*UpdateGCSafePointResponse, error) + SyncRegions(PD_SyncRegionsServer) error } func RegisterPDServer(s *grpc.Server, srv PDServer) { @@ -2487,6 +4251,32 @@ func _PD_UpdateGCSafePoint_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } +func _PD_SyncRegions_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(PDServer).SyncRegions(&pDSyncRegionsServer{stream}) +} + +type PD_SyncRegionsServer interface { + Send(*SyncRegionResponse) error + Recv() (*SyncRegionRequest, error) + grpc.ServerStream +} + +type pDSyncRegionsServer struct { + grpc.ServerStream +} + +func (x *pDSyncRegionsServer) Send(m *SyncRegionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *pDSyncRegionsServer) Recv() (*SyncRegionRequest, error) { + m := new(SyncRegionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + var _PD_serviceDesc = grpc.ServiceDesc{ ServiceName: "pdpb.PD", HandlerType: (*PDServer)(nil), @@ -2585,6 +4375,12 @@ var _PD_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, ClientStreams: true, }, + { + StreamName: "SyncRegions", + Handler: _PD_SyncRegions_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "pdpb.proto", } @@ -2609,6 +4405,9 @@ func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.ClusterId)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2642,6 +4441,9 @@ func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { } i += n1 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2671,6 +4473,9 @@ func (m *Error) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPdpb(dAtA, i, uint64(len(m.Message))) i += copy(dAtA[i:], m.Message) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2704,6 +4509,9 @@ func (m *TsoRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.Count)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2732,6 +4540,9 @@ func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.Logical)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2775,6 +4586,9 @@ func (m *TsoResponse) MarshalTo(dAtA []byte) (int, error) { } i += n4 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2823,6 +4637,9 @@ func (m *BootstrapRequest) MarshalTo(dAtA []byte) (int, error) { } i += n7 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2851,6 +4668,9 @@ func (m *BootstrapResponse) MarshalTo(dAtA []byte) (int, error) { } i += n8 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2879,6 +4699,9 @@ func (m *IsBootstrappedRequest) MarshalTo(dAtA []byte) (int, error) { } i += n9 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2917,6 +4740,9 @@ func (m *IsBootstrappedResponse) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2945,6 +4771,9 @@ func (m *AllocIDRequest) MarshalTo(dAtA []byte) (int, error) { } i += n11 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -2978,6 +4807,9 @@ func (m *AllocIDResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.Id)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3011,6 +4843,9 @@ func (m *GetStoreRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.StoreId)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3049,6 +4884,9 @@ func (m *GetStoreResponse) MarshalTo(dAtA []byte) (int, error) { } i += n15 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3087,6 +4925,9 @@ func (m *PutStoreRequest) MarshalTo(dAtA []byte) (int, error) { } i += n17 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3115,6 +4956,9 @@ func (m *PutStoreResponse) MarshalTo(dAtA []byte) (int, error) { } i += n18 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3143,6 +4987,9 @@ func (m *GetAllStoresRequest) MarshalTo(dAtA []byte) (int, error) { } i += n19 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3183,6 +5030,9 @@ func (m *GetAllStoresResponse) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3217,6 +5067,9 @@ func (m *GetRegionRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPdpb(dAtA, i, uint64(len(m.RegionKey))) i += copy(dAtA[i:], m.RegionKey) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3265,6 +5118,9 @@ func (m *GetRegionResponse) MarshalTo(dAtA []byte) (int, error) { } i += n24 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3298,6 +5154,9 @@ func (m *GetRegionByIDRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.RegionId)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3326,6 +5185,9 @@ func (m *GetClusterConfigRequest) MarshalTo(dAtA []byte) (int, error) { } i += n26 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3364,6 +5226,9 @@ func (m *GetClusterConfigResponse) MarshalTo(dAtA []byte) (int, error) { } i += n28 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3402,6 +5267,9 @@ func (m *PutClusterConfigRequest) MarshalTo(dAtA []byte) (int, error) { } i += n30 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3430,6 +5298,9 @@ func (m *PutClusterConfigResponse) MarshalTo(dAtA []byte) (int, error) { } i += n31 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3494,6 +5365,9 @@ func (m *Member) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.LeaderPriority)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3522,6 +5396,9 @@ func (m *GetMembersRequest) MarshalTo(dAtA []byte) (int, error) { } i += n32 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3582,6 +5459,9 @@ func (m *GetMembersResponse) MarshalTo(dAtA []byte) (int, error) { } i += n35 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3615,6 +5495,9 @@ func (m *PeerStats) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.DownSeconds)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3727,6 +5610,9 @@ func (m *RegionHeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.ApproximateKeys)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3760,6 +5646,9 @@ func (m *ChangePeer) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.ChangeType)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3788,6 +5677,9 @@ func (m *TransferLeader) MarshalTo(dAtA []byte) (int, error) { } i += n42 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3816,6 +5708,9 @@ func (m *Merge) MarshalTo(dAtA []byte) (int, error) { } i += n43 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3839,6 +5734,9 @@ func (m *SplitRegion) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.Policy)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3932,6 +5830,9 @@ func (m *RegionHeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { } i += n50 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -3970,6 +5871,9 @@ func (m *AskSplitRequest) MarshalTo(dAtA []byte) (int, error) { } i += n52 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4020,6 +5924,9 @@ func (m *AskSplitResponse) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPdpb(dAtA, i, uint64(j54)) i += copy(dAtA[i:], dAtA55[:j54]) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4068,6 +5975,9 @@ func (m *ReportSplitRequest) MarshalTo(dAtA []byte) (int, error) { } i += n58 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4096,6 +6006,9 @@ func (m *ReportSplitResponse) MarshalTo(dAtA []byte) (int, error) { } i += n59 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4139,6 +6052,9 @@ func (m *AskBatchSplitRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.SplitCount)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4179,6 +6095,9 @@ func (m *SplitID) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPdpb(dAtA, i, uint64(j62)) i += copy(dAtA[i:], dAtA63[:j62]) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4219,6 +6138,9 @@ func (m *AskBatchSplitResponse) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4259,6 +6181,9 @@ func (m *ReportBatchSplitRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4287,6 +6212,9 @@ func (m *ReportBatchSplitResponse) MarshalTo(dAtA []byte) (int, error) { } i += n66 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4315,6 +6243,9 @@ func (m *TimeInterval) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.EndTimestamp)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4418,6 +6349,9 @@ func (m *StoreStats) MarshalTo(dAtA []byte) (int, error) { } i += n67 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4456,6 +6390,9 @@ func (m *StoreHeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { } i += n69 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4484,6 +6421,9 @@ func (m *StoreHeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { } i += n70 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4537,6 +6477,9 @@ func (m *ScatterRegionRequest) MarshalTo(dAtA []byte) (int, error) { } i += n73 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4565,6 +6508,9 @@ func (m *ScatterRegionResponse) MarshalTo(dAtA []byte) (int, error) { } i += n74 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4593,6 +6539,9 @@ func (m *GetGCSafePointRequest) MarshalTo(dAtA []byte) (int, error) { } i += n75 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4626,6 +6575,9 @@ func (m *GetGCSafePointResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.SafePoint)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4659,6 +6611,9 @@ func (m *UpdateGCSafePointRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.SafePoint)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -4692,27 +6647,96 @@ func (m *UpdateGCSafePointResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.NewSafePoint)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } -func encodeFixed64Pdpb(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Pdpb(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 +func (m *SyncRegionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SyncRegionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size())) + n79, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n79 + } + if m.Member != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintPdpb(dAtA, i, uint64(m.Member.Size())) + n80, err := m.Member.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n80 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SyncRegionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SyncRegionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size())) + n81, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n81 + } + if len(m.Regions) > 0 { + for _, msg := range m.Regions { + dAtA[i] = 0x12 + i++ + i = encodeVarintPdpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } + func encodeVarintPdpb(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -4723,15 +6747,24 @@ func encodeVarintPdpb(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *RequestHeader) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ClusterId != 0 { n += 1 + sovPdpb(uint64(m.ClusterId)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ResponseHeader) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ClusterId != 0 { @@ -4741,10 +6774,16 @@ func (m *ResponseHeader) Size() (n int) { l = m.Error.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Error) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Type != 0 { @@ -4754,10 +6793,16 @@ func (m *Error) Size() (n int) { if l > 0 { n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *TsoRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4767,10 +6812,16 @@ func (m *TsoRequest) Size() (n int) { if m.Count != 0 { n += 1 + sovPdpb(uint64(m.Count)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Physical != 0 { @@ -4779,10 +6830,16 @@ func (m *Timestamp) Size() (n int) { if m.Logical != 0 { n += 1 + sovPdpb(uint64(m.Logical)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *TsoResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4796,10 +6853,16 @@ func (m *TsoResponse) Size() (n int) { l = m.Timestamp.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *BootstrapRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4814,30 +6877,48 @@ func (m *BootstrapRequest) Size() (n int) { l = m.Region.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *BootstrapResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *IsBootstrappedRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *IsBootstrappedResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4847,20 +6928,32 @@ func (m *IsBootstrappedResponse) Size() (n int) { if m.Bootstrapped { n += 2 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *AllocIDRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *AllocIDResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4870,10 +6963,16 @@ func (m *AllocIDResponse) Size() (n int) { if m.Id != 0 { n += 1 + sovPdpb(uint64(m.Id)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetStoreRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4883,10 +6982,16 @@ func (m *GetStoreRequest) Size() (n int) { if m.StoreId != 0 { n += 1 + sovPdpb(uint64(m.StoreId)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetStoreResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4897,10 +7002,16 @@ func (m *GetStoreResponse) Size() (n int) { l = m.Store.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *PutStoreRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4911,30 +7022,48 @@ func (m *PutStoreRequest) Size() (n int) { l = m.Store.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *PutStoreResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetAllStoresRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetAllStoresResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4947,10 +7076,16 @@ func (m *GetAllStoresResponse) Size() (n int) { n += 1 + l + sovPdpb(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetRegionRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4961,10 +7096,16 @@ func (m *GetRegionRequest) Size() (n int) { if l > 0 { n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetRegionResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4979,10 +7120,16 @@ func (m *GetRegionResponse) Size() (n int) { l = m.Leader.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetRegionByIDRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -4992,20 +7139,32 @@ func (m *GetRegionByIDRequest) Size() (n int) { if m.RegionId != 0 { n += 1 + sovPdpb(uint64(m.RegionId)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetClusterConfigRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetClusterConfigResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5016,10 +7175,16 @@ func (m *GetClusterConfigResponse) Size() (n int) { l = m.Cluster.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *PutClusterConfigRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5030,20 +7195,32 @@ func (m *PutClusterConfigRequest) Size() (n int) { l = m.Cluster.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *PutClusterConfigResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Member) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -5068,20 +7245,32 @@ func (m *Member) Size() (n int) { if m.LeaderPriority != 0 { n += 1 + sovPdpb(uint64(m.LeaderPriority)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetMembersRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetMembersResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5102,10 +7291,16 @@ func (m *GetMembersResponse) Size() (n int) { l = m.EtcdLeader.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *PeerStats) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Peer != nil { @@ -5115,10 +7310,16 @@ func (m *PeerStats) Size() (n int) { if m.DownSeconds != 0 { n += 1 + sovPdpb(uint64(m.DownSeconds)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *RegionHeartbeatRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5167,10 +7368,16 @@ func (m *RegionHeartbeatRequest) Size() (n int) { if m.ApproximateKeys != 0 { n += 1 + sovPdpb(uint64(m.ApproximateKeys)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ChangePeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Peer != nil { @@ -5180,39 +7387,63 @@ func (m *ChangePeer) Size() (n int) { if m.ChangeType != 0 { n += 1 + sovPdpb(uint64(m.ChangeType)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *TransferLeader) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Peer != nil { l = m.Peer.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Merge) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Target != nil { l = m.Target.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *SplitRegion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Policy != 0 { n += 1 + sovPdpb(uint64(m.Policy)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *RegionHeartbeatResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5246,10 +7477,16 @@ func (m *RegionHeartbeatResponse) Size() (n int) { l = m.SplitRegion.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *AskSplitRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5260,10 +7497,16 @@ func (m *AskSplitRequest) Size() (n int) { l = m.Region.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *AskSplitResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5280,10 +7523,16 @@ func (m *AskSplitResponse) Size() (n int) { } n += 1 + sovPdpb(uint64(l)) + l } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ReportSplitRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5298,20 +7547,32 @@ func (m *ReportSplitRequest) Size() (n int) { l = m.Right.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ReportSplitResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *AskBatchSplitRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5325,10 +7586,16 @@ func (m *AskBatchSplitRequest) Size() (n int) { if m.SplitCount != 0 { n += 1 + sovPdpb(uint64(m.SplitCount)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *SplitID) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.NewRegionId != 0 { @@ -5341,10 +7608,16 @@ func (m *SplitID) Size() (n int) { } n += 1 + sovPdpb(uint64(l)) + l } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *AskBatchSplitResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5357,10 +7630,16 @@ func (m *AskBatchSplitResponse) Size() (n int) { n += 1 + l + sovPdpb(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ReportBatchSplitRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5373,20 +7652,32 @@ func (m *ReportBatchSplitRequest) Size() (n int) { n += 1 + l + sovPdpb(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ReportBatchSplitResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *TimeInterval) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.StartTimestamp != 0 { @@ -5395,10 +7686,16 @@ func (m *TimeInterval) Size() (n int) { if m.EndTimestamp != 0 { n += 1 + sovPdpb(uint64(m.EndTimestamp)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StoreStats) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.StoreId != 0 { @@ -5447,10 +7744,16 @@ func (m *StoreStats) Size() (n int) { l = m.Interval.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StoreHeartbeatRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5461,20 +7764,32 @@ func (m *StoreHeartbeatRequest) Size() (n int) { l = m.Stats.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StoreHeartbeatResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ScatterRegionRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5492,30 +7807,48 @@ func (m *ScatterRegionRequest) Size() (n int) { l = m.Leader.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ScatterRegionResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetGCSafePointRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovPdpb(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *GetGCSafePointResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5525,10 +7858,16 @@ func (m *GetGCSafePointResponse) Size() (n int) { if m.SafePoint != 0 { n += 1 + sovPdpb(uint64(m.SafePoint)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *UpdateGCSafePointRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5538,10 +7877,16 @@ func (m *UpdateGCSafePointRequest) Size() (n int) { if m.SafePoint != 0 { n += 1 + sovPdpb(uint64(m.SafePoint)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *UpdateGCSafePointResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Header != nil { @@ -5551,6 +7896,51 @@ func (m *UpdateGCSafePointResponse) Size() (n int) { if m.NewSafePoint != 0 { n += 1 + sovPdpb(uint64(m.NewSafePoint)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SyncRegionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovPdpb(uint64(l)) + } + if m.Member != nil { + l = m.Member.Size() + n += 1 + l + sovPdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SyncRegionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovPdpb(uint64(l)) + } + if len(m.Regions) > 0 { + for _, e := range m.Regions { + l = e.Size() + n += 1 + l + sovPdpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -5627,6 +8017,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5729,6 +8120,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5827,6 +8219,7 @@ func (m *Error) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5929,6 +8322,7 @@ func (m *TsoRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6017,6 +8411,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6152,6 +8547,7 @@ func (m *TsoResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6301,6 +8697,7 @@ func (m *BootstrapRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6384,6 +8781,7 @@ func (m *BootstrapResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6467,6 +8865,7 @@ func (m *IsBootstrappedRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6570,6 +8969,7 @@ func (m *IsBootstrappedResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6653,6 +9053,7 @@ func (m *AllocIDRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6755,6 +9156,7 @@ func (m *AllocIDResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6857,6 +9259,7 @@ func (m *GetStoreRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -6973,6 +9376,7 @@ func (m *GetStoreResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7089,6 +9493,7 @@ func (m *PutStoreRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7172,6 +9577,7 @@ func (m *PutStoreResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7255,6 +9661,7 @@ func (m *GetAllStoresRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7369,6 +9776,7 @@ func (m *GetAllStoresResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7483,6 +9891,7 @@ func (m *GetRegionRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7632,6 +10041,7 @@ func (m *GetRegionResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7734,6 +10144,7 @@ func (m *GetRegionByIDRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7817,6 +10228,7 @@ func (m *GetClusterConfigRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -7933,6 +10345,7 @@ func (m *GetClusterConfigResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -8049,6 +10462,7 @@ func (m *PutClusterConfigRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -8132,6 +10546,7 @@ func (m *PutClusterConfigResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -8307,6 +10722,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -8390,6 +10806,7 @@ func (m *GetMembersRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -8570,6 +10987,7 @@ func (m *GetMembersResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -8672,6 +11090,7 @@ func (m *PeerStats) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9030,6 +11449,7 @@ func (m *RegionHeartbeatRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9132,6 +11552,7 @@ func (m *ChangePeer) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9215,6 +11636,7 @@ func (m *TransferLeader) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9298,6 +11720,7 @@ func (m *Merge) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9367,6 +11790,7 @@ func (m *SplitRegion) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9667,6 +12091,7 @@ func (m *RegionHeartbeatResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9783,6 +12208,7 @@ func (m *AskSplitRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9874,7 +12300,24 @@ func (m *AskSplitResponse) Unmarshal(dAtA []byte) error { } } case 3: - if wireType == 2 { + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NewPeerIds = append(m.NewPeerIds, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -9910,28 +12353,11 @@ func (m *AskSplitResponse) Unmarshal(dAtA []byte) error { iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { - break - } - } - m.NewPeerIds = append(m.NewPeerIds, v) - } - } else if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPdpb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + break + } } + m.NewPeerIds = append(m.NewPeerIds, v) } - m.NewPeerIds = append(m.NewPeerIds, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field NewPeerIds", wireType) } @@ -9947,6 +12373,7 @@ func (m *AskSplitResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10096,6 +12523,7 @@ func (m *ReportSplitRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10179,6 +12607,7 @@ func (m *ReportSplitResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10314,6 +12743,7 @@ func (m *AskBatchSplitRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10372,7 +12802,24 @@ func (m *SplitID) Unmarshal(dAtA []byte) error { } } case 2: - if wireType == 2 { + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NewPeerIds = append(m.NewPeerIds, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -10413,23 +12860,6 @@ func (m *SplitID) Unmarshal(dAtA []byte) error { } m.NewPeerIds = append(m.NewPeerIds, v) } - } else if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPdpb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.NewPeerIds = append(m.NewPeerIds, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field NewPeerIds", wireType) } @@ -10445,6 +12875,7 @@ func (m *SplitID) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10559,6 +12990,7 @@ func (m *AskBatchSplitResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10673,6 +13105,7 @@ func (m *ReportBatchSplitRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10756,6 +13189,7 @@ func (m *ReportBatchSplitResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10844,6 +13278,7 @@ func (m *TimeInterval) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11194,6 +13629,7 @@ func (m *StoreStats) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11310,6 +13746,7 @@ func (m *StoreHeartbeatRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11393,6 +13830,7 @@ func (m *StoreHeartbeatResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11561,6 +13999,7 @@ func (m *ScatterRegionRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11644,6 +14083,7 @@ func (m *ScatterRegionResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11727,6 +14167,7 @@ func (m *GetGCSafePointRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11829,6 +14270,7 @@ func (m *GetGCSafePointResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11931,6 +14373,7 @@ func (m *UpdateGCSafePointRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -12033,6 +14476,239 @@ func (m *UpdateGCSafePointResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SyncRegionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SyncRegionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SyncRegionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Member == nil { + m.Member = &Member{} + } + if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SyncRegionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SyncRegionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SyncRegionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Regions = append(m.Regions, &metapb.Region{}) + if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -12147,154 +14823,157 @@ var ( ErrIntOverflowPdpb = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("pdpb.proto", fileDescriptorPdpb) } - -var fileDescriptorPdpb = []byte{ - // 2328 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x5a, 0x5b, 0x6f, 0x1b, 0xc7, - 0x15, 0xd6, 0x52, 0x24, 0x45, 0x1e, 0x5e, 0x35, 0xd6, 0x85, 0xa6, 0x6f, 0xca, 0xd8, 0x6d, 0x65, - 0x37, 0x61, 0x1c, 0xb7, 0x28, 0x0c, 0x14, 0x09, 0x42, 0x5d, 0x2c, 0x33, 0xb6, 0x44, 0x62, 0x48, - 0x27, 0x0d, 0x50, 0x94, 0x5d, 0x91, 0x23, 0x6a, 0x2b, 0x6a, 0x77, 0xb3, 0x3b, 0x92, 0xcb, 0xbc, - 0xb4, 0x4f, 0x41, 0x81, 0x06, 0xe8, 0x5b, 0xd1, 0x97, 0x02, 0xfd, 0x05, 0xfd, 0x07, 0x45, 0x5f, - 0xfb, 0xd8, 0x9f, 0x50, 0xb8, 0x7f, 0xa4, 0x98, 0xdb, 0xde, 0x48, 0xd9, 0xee, 0x3a, 0x79, 0x32, - 0xf7, 0x9c, 0x33, 0xdf, 0xcc, 0x7c, 0xe7, 0x9c, 0x99, 0x33, 0x47, 0x06, 0x70, 0xc7, 0xee, 0x71, - 0xcb, 0xf5, 0x1c, 0xe6, 0xa0, 0x2c, 0xff, 0xdd, 0x2c, 0x9f, 0x53, 0x66, 0x6a, 0x59, 0xb3, 0x42, - 0x3d, 0xf3, 0x84, 0x05, 0x9f, 0x6b, 0x13, 0x67, 0xe2, 0x88, 0x9f, 0x1f, 0xf2, 0x5f, 0x52, 0x8a, - 0x5b, 0x50, 0x21, 0xf4, 0xab, 0x0b, 0xea, 0xb3, 0xa7, 0xd4, 0x1c, 0x53, 0x0f, 0xdd, 0x02, 0x18, - 0x4d, 0x2f, 0x7c, 0x46, 0xbd, 0xa1, 0x35, 0x6e, 0x18, 0x5b, 0xc6, 0x76, 0x96, 0x14, 0x95, 0xa4, - 0x33, 0xc6, 0x04, 0xaa, 0x84, 0xfa, 0xae, 0x63, 0xfb, 0xf4, 0xad, 0x06, 0xa0, 0xf7, 0x20, 0x47, - 0x3d, 0xcf, 0xf1, 0x1a, 0x99, 0x2d, 0x63, 0xbb, 0xf4, 0xa8, 0xd4, 0x12, 0xab, 0xde, 0xe7, 0x22, - 0x22, 0x35, 0xf8, 0x09, 0xe4, 0xc4, 0x37, 0xba, 0x0b, 0x59, 0x36, 0x73, 0xa9, 0x00, 0xa9, 0x3e, - 0xaa, 0x45, 0x4c, 0x07, 0x33, 0x97, 0x12, 0xa1, 0x44, 0x0d, 0x58, 0x39, 0xa7, 0xbe, 0x6f, 0x4e, - 0xa8, 0x80, 0x2c, 0x12, 0xfd, 0x89, 0xbb, 0x00, 0x03, 0xdf, 0x51, 0xdb, 0x41, 0x3f, 0x86, 0xfc, - 0xa9, 0x58, 0xa1, 0x80, 0x2b, 0x3d, 0xba, 0x26, 0xe1, 0x62, 0xbb, 0x25, 0xca, 0x04, 0xad, 0x41, - 0x6e, 0xe4, 0x5c, 0xd8, 0x4c, 0x40, 0x56, 0x88, 0xfc, 0xc0, 0x6d, 0x28, 0x0e, 0xac, 0x73, 0xea, - 0x33, 0xf3, 0xdc, 0x45, 0x4d, 0x28, 0xb8, 0xa7, 0x33, 0xdf, 0x1a, 0x99, 0x53, 0x81, 0xb8, 0x4c, - 0x82, 0x6f, 0xbe, 0xa6, 0xa9, 0x33, 0x11, 0xaa, 0x8c, 0x50, 0xe9, 0x4f, 0xfc, 0x7b, 0x03, 0x4a, - 0x62, 0x51, 0x92, 0x33, 0xf4, 0x7e, 0x62, 0x55, 0x6b, 0x7a, 0x55, 0x51, 0x4e, 0x5f, 0xbf, 0x2c, - 0xf4, 0x01, 0x14, 0x99, 0x5e, 0x56, 0x63, 0x59, 0xc0, 0x28, 0xae, 0x82, 0xd5, 0x92, 0xd0, 0x02, - 0x7f, 0x6b, 0x40, 0x7d, 0xc7, 0x71, 0x98, 0xcf, 0x3c, 0xd3, 0x4d, 0xc5, 0xce, 0x5d, 0xc8, 0xf9, - 0xcc, 0xf1, 0xa8, 0xf2, 0x61, 0xa5, 0xa5, 0xe2, 0xac, 0xcf, 0x85, 0x44, 0xea, 0xd0, 0x0f, 0x21, - 0xef, 0xd1, 0x89, 0xe5, 0xd8, 0x6a, 0x49, 0x55, 0x6d, 0x45, 0x84, 0x94, 0x28, 0x2d, 0x6e, 0xc3, - 0x6a, 0x64, 0x35, 0x69, 0x68, 0xc1, 0x7b, 0xb0, 0xde, 0xf1, 0x03, 0x10, 0x97, 0x8e, 0xd3, 0xec, - 0x0a, 0xff, 0x06, 0x36, 0x92, 0x28, 0xa9, 0x9c, 0x84, 0xa1, 0x7c, 0x1c, 0x41, 0x11, 0x24, 0x15, - 0x48, 0x4c, 0x86, 0x3f, 0x86, 0x6a, 0x7b, 0x3a, 0x75, 0x46, 0x9d, 0xbd, 0x54, 0x4b, 0xed, 0x42, - 0x2d, 0x18, 0x9e, 0x6a, 0x8d, 0x55, 0xc8, 0x58, 0x72, 0x65, 0x59, 0x92, 0xb1, 0xc6, 0xf8, 0x4b, - 0xa8, 0x1d, 0x50, 0x26, 0xfd, 0x97, 0x26, 0x22, 0xae, 0x43, 0x41, 0x78, 0x7d, 0x18, 0xa0, 0xae, - 0x88, 0xef, 0xce, 0x18, 0x53, 0xa8, 0x87, 0xd0, 0xa9, 0x16, 0xfb, 0x36, 0xe1, 0x86, 0x47, 0x50, - 0xeb, 0x5d, 0xbc, 0xc3, 0x0e, 0xde, 0x6a, 0x92, 0x4f, 0xa1, 0x1e, 0x4e, 0x92, 0x2a, 0x54, 0x77, - 0xe0, 0xda, 0x01, 0x65, 0xed, 0xe9, 0x54, 0x80, 0xf8, 0xa9, 0xbc, 0x7f, 0x06, 0x6b, 0x71, 0x8c, - 0x54, 0xac, 0xfe, 0x00, 0xf2, 0x62, 0x53, 0x7e, 0x23, 0xb3, 0xb5, 0x3c, 0xbf, 0x63, 0xa5, 0xc4, - 0xbf, 0x12, 0xee, 0x53, 0x39, 0x9b, 0x86, 0xd8, 0x5b, 0x00, 0x32, 0xd3, 0x87, 0x67, 0x74, 0x26, - 0xd8, 0x2d, 0x93, 0xa2, 0x94, 0x3c, 0xa3, 0x33, 0xfc, 0x27, 0x03, 0x56, 0x23, 0x13, 0xa4, 0xda, - 0x4a, 0x78, 0xd4, 0x64, 0x5e, 0x77, 0xd4, 0xa0, 0x7b, 0x90, 0x9f, 0x4a, 0x54, 0x79, 0x24, 0x95, - 0xb5, 0x5d, 0x8f, 0x72, 0x34, 0xa9, 0xc3, 0xbf, 0x16, 0xf4, 0xca, 0xa1, 0x3b, 0xb3, 0x74, 0x19, - 0x8a, 0x6e, 0x80, 0xda, 0x63, 0x98, 0x11, 0x05, 0x29, 0xe8, 0x8c, 0xf1, 0x13, 0xd8, 0x3c, 0xa0, - 0x6c, 0x57, 0xde, 0x89, 0xbb, 0x8e, 0x7d, 0x62, 0x4d, 0x52, 0x05, 0x82, 0x0f, 0x8d, 0x79, 0x9c, - 0x54, 0x0c, 0xde, 0x87, 0x15, 0x75, 0x45, 0x2b, 0x0a, 0x6b, 0x9a, 0x1a, 0x85, 0x4e, 0xb4, 0x1e, - 0x7f, 0x05, 0x9b, 0xbd, 0x8b, 0x77, 0x5f, 0xfc, 0xff, 0x33, 0xe5, 0x53, 0x68, 0xcc, 0x4f, 0x99, - 0x2a, 0xfd, 0xfe, 0x66, 0x40, 0xfe, 0x90, 0x9e, 0x1f, 0x53, 0x0f, 0x21, 0xc8, 0xda, 0xe6, 0xb9, - 0x2c, 0x2e, 0x8a, 0x44, 0xfc, 0xe6, 0x5e, 0x3b, 0x17, 0xda, 0x88, 0xd7, 0xa4, 0xa0, 0x33, 0xe6, - 0x4a, 0x97, 0x52, 0x6f, 0x78, 0xe1, 0x4d, 0xfd, 0xc6, 0xf2, 0xd6, 0xf2, 0x76, 0x91, 0x14, 0xb8, - 0xe0, 0x85, 0x37, 0xf5, 0xd1, 0x1d, 0x28, 0x8d, 0xa6, 0x16, 0xb5, 0x99, 0x54, 0x67, 0x85, 0x1a, - 0xa4, 0x48, 0x18, 0xfc, 0x08, 0x6a, 0x32, 0xbe, 0x86, 0xae, 0x67, 0x39, 0x9e, 0xc5, 0x66, 0x8d, - 0xdc, 0x96, 0xb1, 0x9d, 0x23, 0x55, 0x29, 0xee, 0x29, 0x29, 0xfe, 0x54, 0xe4, 0x83, 0x5c, 0x64, - 0xba, 0xf3, 0xe1, 0x9f, 0x06, 0xa0, 0x28, 0x44, 0xca, 0x9c, 0x5a, 0x91, 0x3b, 0xd7, 0xe7, 0x43, - 0x59, 0x9a, 0x4b, 0x54, 0xa2, 0x95, 0x0b, 0x72, 0x2a, 0x6a, 0xa6, 0x74, 0xe8, 0x03, 0x28, 0x51, - 0x36, 0x1a, 0x0f, 0x95, 0x69, 0x76, 0x81, 0x29, 0x70, 0x83, 0xe7, 0x72, 0x07, 0x3d, 0x28, 0xf2, - 0x94, 0xec, 0x33, 0x93, 0xf9, 0x68, 0x0b, 0xb2, 0x9c, 0x66, 0xb5, 0xea, 0x78, 0xce, 0x0a, 0x0d, - 0x7a, 0x0f, 0xca, 0x63, 0xe7, 0xa5, 0x3d, 0xf4, 0xe9, 0xc8, 0xb1, 0xc7, 0xbe, 0xf2, 0x5c, 0x89, - 0xcb, 0xfa, 0x52, 0x84, 0xbf, 0xc9, 0xc2, 0x86, 0x4c, 0xe9, 0xa7, 0xd4, 0xf4, 0xd8, 0x31, 0x35, - 0x59, 0xaa, 0xa8, 0xfd, 0x4e, 0x8f, 0x1a, 0xd4, 0x02, 0x10, 0x0b, 0xe7, 0xbb, 0x90, 0x41, 0x13, - 0x94, 0x6e, 0xc1, 0xfe, 0x49, 0x91, 0x9b, 0xf0, 0x4f, 0x1f, 0x7d, 0x04, 0x15, 0x97, 0xda, 0x63, - 0xcb, 0x9e, 0xa8, 0x21, 0x39, 0xe5, 0x9a, 0x28, 0x78, 0x59, 0x99, 0xc8, 0x21, 0x77, 0xa1, 0x72, - 0x3c, 0x63, 0xd4, 0x1f, 0xbe, 0xf4, 0x2c, 0xc6, 0xa8, 0xdd, 0xc8, 0x0b, 0x72, 0xca, 0x42, 0xf8, - 0x85, 0x94, 0xf1, 0x33, 0x5a, 0x1a, 0x79, 0xd4, 0x1c, 0x37, 0x56, 0x64, 0xcd, 0x2e, 0x24, 0x84, - 0x9a, 0xbc, 0x66, 0x2f, 0x9f, 0xd1, 0x59, 0x08, 0x51, 0x90, 0xfc, 0x72, 0x99, 0x46, 0xb8, 0x01, - 0x45, 0x61, 0x22, 0x00, 0x8a, 0x32, 0x73, 0xb8, 0x40, 0x8c, 0xbf, 0x0f, 0x75, 0xd3, 0x75, 0x3d, - 0xe7, 0xb7, 0xd6, 0xb9, 0xc9, 0xe8, 0xd0, 0xb7, 0xbe, 0xa6, 0x0d, 0x10, 0x36, 0xb5, 0x88, 0xbc, - 0x6f, 0x7d, 0x4d, 0x51, 0x0b, 0x0a, 0x96, 0xcd, 0xa8, 0x77, 0x69, 0x4e, 0x1b, 0x65, 0xc1, 0x1c, - 0x0a, 0x4b, 0xd9, 0x8e, 0xd2, 0x90, 0xc0, 0x26, 0x09, 0xcd, 0xa7, 0x6c, 0x54, 0xe6, 0xa0, 0x9f, - 0xd1, 0x99, 0xff, 0x59, 0xb6, 0x50, 0xaa, 0x97, 0xf1, 0x29, 0xc0, 0xee, 0xa9, 0x69, 0x4f, 0x28, - 0xa7, 0xe7, 0x2d, 0x62, 0xeb, 0x31, 0x94, 0x46, 0xc2, 0x7e, 0x28, 0x9e, 0x22, 0x19, 0xf1, 0x14, - 0xd9, 0x6c, 0xe9, 0xb7, 0x14, 0x3f, 0x8d, 0x24, 0x9e, 0x78, 0x92, 0xc0, 0x28, 0xf8, 0x8d, 0x1f, - 0x41, 0x75, 0xe0, 0x99, 0xb6, 0x7f, 0x42, 0x3d, 0x19, 0xd6, 0x6f, 0x9e, 0x0d, 0x7f, 0x08, 0xb9, - 0x43, 0xea, 0x4d, 0x44, 0xf5, 0xcc, 0x4c, 0x6f, 0x42, 0x99, 0x32, 0x9e, 0x8b, 0x33, 0xa9, 0xc5, - 0x8f, 0xa1, 0xd4, 0x77, 0xa7, 0x96, 0xba, 0xae, 0xd0, 0x7d, 0xc8, 0xbb, 0xce, 0xd4, 0x1a, 0xcd, - 0xd4, 0x9b, 0x69, 0x55, 0x92, 0xb7, 0x7b, 0x4a, 0x47, 0x67, 0x3d, 0xa1, 0x20, 0xca, 0x00, 0xff, - 0x79, 0x19, 0x36, 0xe7, 0x32, 0x22, 0xd5, 0x51, 0xf1, 0x51, 0x40, 0x91, 0xd8, 0x9d, 0x4c, 0x8c, - 0xba, 0x9e, 0x59, 0x73, 0xad, 0xb9, 0x11, 0xbc, 0x7f, 0x0c, 0x35, 0xa6, 0xb8, 0x19, 0xc6, 0xf2, - 0x44, 0xcd, 0x14, 0x27, 0x8e, 0x54, 0x59, 0x9c, 0xc8, 0xd8, 0xed, 0x9a, 0x8d, 0xdf, 0xae, 0xe8, - 0x67, 0x50, 0x56, 0x4a, 0xea, 0x3a, 0xa3, 0x53, 0x71, 0xcc, 0xf2, 0xac, 0x8e, 0x11, 0xb8, 0xcf, - 0x55, 0xa4, 0xe4, 0x85, 0x1f, 0xfc, 0x8c, 0x92, 0xa4, 0xca, 0x6d, 0xe4, 0x17, 0x38, 0x09, 0xa4, - 0x41, 0x4f, 0x1e, 0x3a, 0xb9, 0x73, 0xee, 0x2a, 0x91, 0x2e, 0xc1, 0x43, 0x56, 0x78, 0x8f, 0x48, - 0x0d, 0xfa, 0x29, 0x94, 0x7d, 0xee, 0x9c, 0xa1, 0x3a, 0x32, 0x0a, 0xc2, 0x52, 0xf9, 0x24, 0xe2, - 0x36, 0x52, 0xf2, 0xc3, 0x0f, 0x7c, 0x02, 0xb5, 0xb6, 0x7f, 0xa6, 0xd4, 0xdf, 0xdf, 0x11, 0x85, - 0xbf, 0x31, 0xa0, 0x1e, 0x4e, 0x94, 0xf2, 0xa9, 0x53, 0xb1, 0xe9, 0xcb, 0x61, 0xb2, 0xd2, 0x29, - 0xd9, 0xf4, 0x25, 0xd1, 0xee, 0xd8, 0x82, 0x32, 0xb7, 0x11, 0x57, 0xa7, 0x35, 0x96, 0x37, 0x67, - 0x96, 0x80, 0x4d, 0x5f, 0x72, 0x1a, 0x3b, 0x63, 0x1f, 0xff, 0xd1, 0x00, 0x44, 0xa8, 0xeb, 0x78, - 0x2c, 0xfd, 0xa6, 0x31, 0x64, 0xa7, 0xf4, 0x84, 0x5d, 0xb1, 0x65, 0xa1, 0x43, 0xf7, 0x20, 0xe7, - 0x59, 0x93, 0x53, 0x76, 0xc5, 0x83, 0x54, 0x2a, 0xf1, 0x2e, 0x5c, 0x8b, 0x2d, 0x26, 0x55, 0x9d, - 0xf1, 0xad, 0x01, 0x6b, 0x6d, 0xff, 0x6c, 0xc7, 0x64, 0xa3, 0xd3, 0xef, 0xdd, 0x93, 0xbc, 0xf8, - 0x90, 0x71, 0x26, 0x9b, 0x03, 0xcb, 0xa2, 0x39, 0x00, 0x42, 0xb4, 0x2b, 0x1a, 0x17, 0x5d, 0x58, - 0x11, 0xab, 0xe8, 0xec, 0xcd, 0xbb, 0xcc, 0x78, 0xb3, 0xcb, 0x32, 0x73, 0x2e, 0x3b, 0x81, 0xf5, - 0xc4, 0xf6, 0x52, 0xc5, 0xcf, 0x1d, 0x58, 0xd6, 0xf8, 0xfc, 0x01, 0x12, 0xe6, 0x45, 0x67, 0x8f, - 0x70, 0x0d, 0x76, 0xf9, 0x19, 0xc5, 0x9d, 0xf1, 0x8e, 0x4c, 0x6e, 0xc3, 0x8a, 0xdc, 0xb1, 0x9e, - 0x2c, 0x49, 0xa5, 0x56, 0xf3, 0x5a, 0x73, 0x7e, 0xc6, 0x54, 0x31, 0xf0, 0x4b, 0x28, 0x47, 0x2f, - 0x2d, 0x5e, 0x01, 0xfa, 0xcc, 0xf4, 0xd8, 0x30, 0x6c, 0xd6, 0x48, 0xee, 0xab, 0x42, 0x1c, 0x76, - 0x96, 0xee, 0x42, 0x85, 0xda, 0xe3, 0x88, 0x99, 0xcc, 0xaa, 0x32, 0xb5, 0xc7, 0x81, 0x11, 0xfe, - 0x6b, 0x16, 0x40, 0xbc, 0xd4, 0x64, 0x91, 0x14, 0x7d, 0x80, 0x1b, 0xb1, 0x07, 0x38, 0x6a, 0x42, - 0x61, 0x64, 0xba, 0xe6, 0x88, 0x97, 0x9c, 0xaa, 0xa6, 0xd5, 0xdf, 0xe8, 0x26, 0x14, 0xcd, 0x4b, - 0xd3, 0x9a, 0x9a, 0xc7, 0x53, 0x2a, 0xe2, 0x26, 0x4b, 0x42, 0x01, 0xbf, 0xf7, 0x55, 0x9c, 0xc8, - 0xc0, 0xca, 0x8a, 0xc0, 0x52, 0x87, 0xa6, 0x88, 0x2c, 0xf4, 0x3e, 0x20, 0x5f, 0x55, 0x24, 0xbe, - 0x6d, 0xba, 0xca, 0x30, 0x27, 0x0c, 0xeb, 0x4a, 0xd3, 0xb7, 0x4d, 0x57, 0x5a, 0x3f, 0x84, 0x35, - 0x8f, 0x8e, 0xa8, 0x75, 0x99, 0xb0, 0xcf, 0x0b, 0x7b, 0x14, 0xe8, 0xc2, 0x11, 0xb7, 0x00, 0x42, - 0xd2, 0xc4, 0x51, 0x5b, 0x21, 0xc5, 0x80, 0x2f, 0xd4, 0x82, 0x6b, 0xa6, 0xeb, 0x4e, 0x67, 0x09, - 0xbc, 0x82, 0xb0, 0x5b, 0xd5, 0xaa, 0x10, 0x6e, 0x13, 0x56, 0x2c, 0x7f, 0x78, 0x7c, 0xe1, 0xcf, - 0x44, 0x91, 0x52, 0x20, 0x79, 0xcb, 0xdf, 0xb9, 0xf0, 0x67, 0xfc, 0x46, 0xb9, 0xf0, 0xe9, 0x38, - 0x5a, 0x9b, 0x14, 0xb8, 0x40, 0x14, 0x25, 0x73, 0x35, 0x54, 0x69, 0x41, 0x0d, 0x95, 0x2c, 0x92, - 0xca, 0xf3, 0x45, 0x52, 0xbc, 0xcc, 0xaa, 0x24, 0xcb, 0xac, 0x58, 0x0d, 0x55, 0x4d, 0xd4, 0x50, - 0xd1, 0xc2, 0xa8, 0xf6, 0xe6, 0xc2, 0x08, 0x4f, 0x61, 0x5d, 0x84, 0xc7, 0xbb, 0x96, 0xbb, 0x39, - 0x9f, 0xc7, 0x57, 0xfc, 0x52, 0x0f, 0xe3, 0x8e, 0x48, 0x35, 0x7e, 0x02, 0x1b, 0xc9, 0xd9, 0x52, - 0xe5, 0xcc, 0xdf, 0x0d, 0x58, 0xeb, 0x8f, 0x4c, 0xc6, 0x9f, 0x7f, 0xe9, 0x5b, 0x0e, 0xaf, 0x7b, - 0x7c, 0xbf, 0x6d, 0x5f, 0x32, 0x52, 0xc1, 0x67, 0x5f, 0xd3, 0x2c, 0xd8, 0x87, 0xf5, 0xc4, 0x7a, - 0xd3, 0x76, 0x30, 0x0f, 0x28, 0x3b, 0xd8, 0xed, 0x9b, 0x27, 0xb4, 0xe7, 0x58, 0x76, 0x2a, 0x6f, - 0x61, 0x0a, 0x1b, 0x49, 0x94, 0x54, 0xc7, 0x32, 0x4f, 0x3a, 0xf3, 0x84, 0x0e, 0x5d, 0x8e, 0xa1, - 0x08, 0x2c, 0xfa, 0x1a, 0x14, 0x9f, 0x40, 0xe3, 0x85, 0x3b, 0x36, 0x19, 0x7d, 0xc7, 0xf5, 0xbe, - 0x69, 0x1e, 0x07, 0xae, 0x2f, 0x98, 0x27, 0xd5, 0x8e, 0xee, 0x41, 0x95, 0xdf, 0x68, 0x73, 0xb3, - 0xf1, 0x7b, 0x2e, 0xc0, 0x7e, 0xf0, 0x3b, 0x28, 0x06, 0x7f, 0x5d, 0x40, 0x79, 0xc8, 0x74, 0x9f, - 0xd5, 0x97, 0x50, 0x09, 0x56, 0x5e, 0x1c, 0x3d, 0x3b, 0xea, 0x7e, 0x71, 0x54, 0x37, 0xd0, 0x1a, - 0xd4, 0x8f, 0xba, 0x83, 0xe1, 0x4e, 0xb7, 0x3b, 0xe8, 0x0f, 0x48, 0xbb, 0xd7, 0xdb, 0xdf, 0xab, - 0x67, 0xd0, 0x35, 0xa8, 0xf5, 0x07, 0x5d, 0xb2, 0x3f, 0x1c, 0x74, 0x0f, 0x77, 0xfa, 0x83, 0xee, - 0xd1, 0x7e, 0x7d, 0x19, 0x35, 0x60, 0xad, 0xfd, 0x9c, 0xec, 0xb7, 0xf7, 0xbe, 0x8c, 0x9b, 0x67, - 0xb9, 0xa6, 0x73, 0xb4, 0xdb, 0x3d, 0xec, 0xb5, 0x07, 0x9d, 0x9d, 0xe7, 0xfb, 0xc3, 0xcf, 0xf7, - 0x49, 0xbf, 0xd3, 0x3d, 0xaa, 0xe7, 0x1e, 0x6c, 0x43, 0x29, 0x52, 0xaa, 0xa3, 0x02, 0x64, 0xfb, - 0xbb, 0xed, 0xa3, 0xfa, 0x12, 0xaa, 0x41, 0xa9, 0xdd, 0xeb, 0x91, 0xee, 0x2f, 0x3a, 0x87, 0xed, - 0xc1, 0x7e, 0xdd, 0x78, 0xf4, 0x8f, 0x32, 0x64, 0x7a, 0x7b, 0xa8, 0x0d, 0x10, 0xbe, 0xf4, 0xd1, - 0xa6, 0xe4, 0x60, 0xae, 0x7d, 0xd0, 0x6c, 0xcc, 0x2b, 0x24, 0x4d, 0x78, 0x09, 0x3d, 0x84, 0xe5, - 0x81, 0xef, 0x20, 0x95, 0xda, 0xe1, 0x1f, 0x4c, 0x9a, 0xab, 0x11, 0x89, 0xb6, 0xde, 0x36, 0x1e, - 0x1a, 0xe8, 0x13, 0x28, 0x06, 0x6d, 0x72, 0xb4, 0x21, 0xad, 0x92, 0x7f, 0x50, 0x68, 0x6e, 0xce, - 0xc9, 0x83, 0x19, 0x0f, 0xa1, 0x1a, 0x6f, 0xb4, 0xa3, 0x1b, 0xd2, 0x78, 0x61, 0x13, 0xbf, 0x79, - 0x73, 0xb1, 0x32, 0x80, 0x7b, 0x0c, 0x2b, 0xaa, 0x19, 0x8e, 0x54, 0x10, 0xc4, 0x5b, 0xeb, 0xcd, - 0xf5, 0x84, 0x34, 0x18, 0xf9, 0x73, 0x28, 0xe8, 0xd6, 0x34, 0x5a, 0x0f, 0x28, 0x8a, 0xf6, 0x90, - 0x9b, 0x1b, 0x49, 0x71, 0x74, 0xb0, 0xee, 0x05, 0xeb, 0xc1, 0x89, 0x06, 0xb4, 0x1e, 0x9c, 0x6c, - 0x19, 0xe3, 0x25, 0x74, 0x00, 0xe5, 0x68, 0x0b, 0x17, 0x5d, 0x0f, 0xa6, 0x49, 0xb6, 0x86, 0x9b, - 0xcd, 0x45, 0xaa, 0x28, 0x97, 0xf1, 0x83, 0x57, 0x73, 0xb9, 0xf0, 0xf0, 0xd7, 0x5c, 0x2e, 0x3e, - 0xab, 0xf1, 0x12, 0x1a, 0x40, 0x2d, 0xf1, 0x26, 0x44, 0x37, 0x75, 0x62, 0x2d, 0x6a, 0x9e, 0x34, - 0x6f, 0x5d, 0xa1, 0x4d, 0x06, 0x4c, 0xd0, 0x51, 0x45, 0x21, 0xa3, 0xb1, 0x13, 0xbe, 0xb9, 0x39, - 0x27, 0x0f, 0x56, 0xb5, 0x03, 0x95, 0x03, 0xca, 0x7a, 0x1e, 0xbd, 0x4c, 0x8f, 0xf1, 0x44, 0x60, - 0x84, 0x5d, 0x5d, 0xd4, 0x4c, 0xd8, 0x46, 0x5a, 0xbd, 0xaf, 0xc3, 0xf9, 0x04, 0x0a, 0xfa, 0xd1, - 0xa4, 0xdd, 0x9e, 0x78, 0xad, 0x69, 0xb7, 0x27, 0xdf, 0x56, 0x78, 0xf9, 0x0f, 0x19, 0x03, 0x1d, - 0x40, 0x29, 0xf2, 0xbc, 0x40, 0x0d, 0xcd, 0x5f, 0xf2, 0xf9, 0xd3, 0xbc, 0xbe, 0x40, 0x13, 0x05, - 0xfa, 0x0c, 0x2a, 0xb1, 0x12, 0x5c, 0x6f, 0x68, 0xd1, 0xb3, 0xa3, 0x79, 0x63, 0xa1, 0x2e, 0xd8, - 0x54, 0x1f, 0xea, 0xc9, 0xa2, 0x17, 0xdd, 0x8a, 0xce, 0x3f, 0x8f, 0x78, 0xfb, 0x2a, 0x75, 0x14, - 0x34, 0xd9, 0x9d, 0xd6, 0xa0, 0x57, 0x74, 0xbf, 0x35, 0xe8, 0x55, 0x4d, 0x6d, 0x09, 0x9a, 0x6c, - 0x05, 0x6b, 0xd0, 0x2b, 0xba, 0xd2, 0x1a, 0xf4, 0xaa, 0x0e, 0x32, 0x5e, 0xe2, 0x54, 0xc6, 0x2e, - 0x71, 0x4d, 0xe5, 0xa2, 0x4a, 0x44, 0x53, 0xb9, 0xf0, 0xd6, 0x97, 0x09, 0x19, 0xbf, 0x83, 0x75, - 0x42, 0x2e, 0xbc, 0xdf, 0x75, 0x42, 0x2e, 0xbe, 0xb6, 0xf1, 0x12, 0xfa, 0x1c, 0x56, 0xe7, 0xee, - 0x40, 0xa4, 0x76, 0x74, 0xd5, 0x25, 0xdc, 0xbc, 0x73, 0xa5, 0x5e, 0xe3, 0xee, 0x3c, 0xf8, 0xd7, - 0xab, 0xdb, 0xc6, 0xbf, 0x5f, 0xdd, 0x36, 0xfe, 0xf3, 0xea, 0xb6, 0xf1, 0x97, 0xff, 0xde, 0x5e, - 0x82, 0xc6, 0xc8, 0x39, 0x6f, 0xb9, 0x96, 0x3d, 0x19, 0x99, 0x6e, 0x8b, 0x59, 0x67, 0x97, 0xad, - 0xb3, 0x4b, 0xf1, 0x7f, 0x02, 0x8e, 0xf3, 0xe2, 0x9f, 0x9f, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, - 0x64, 0x88, 0xec, 0x1f, 0x61, 0x20, 0x00, 0x00, +func init() { proto.RegisterFile("pdpb.proto", fileDescriptor_pdpb_3ac9e95c85c25438) } + +var fileDescriptor_pdpb_3ac9e95c85c25438 = []byte{ + // 2371 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x1a, 0xd9, 0x6e, 0x1b, 0xd7, + 0x55, 0xc3, 0x4d, 0xe4, 0xe1, 0xaa, 0x6b, 0x2d, 0x34, 0xbd, 0x29, 0xd7, 0x6e, 0x2b, 0xbb, 0x09, + 0xe3, 0xb8, 0x45, 0x61, 0xa0, 0x48, 0x10, 0x6a, 0xb1, 0xcc, 0xd8, 0x12, 0x89, 0x4b, 0x3a, 0x69, + 0x80, 0xa2, 0xec, 0x68, 0x78, 0x45, 0x4d, 0x45, 0xcd, 0x4c, 0x66, 0x46, 0x72, 0x99, 0x97, 0xf6, + 0xa5, 0x41, 0x81, 0x06, 0xe8, 0x5b, 0xd1, 0x97, 0x02, 0xfd, 0x82, 0xfe, 0x42, 0x5f, 0xfb, 0xd8, + 0x4f, 0x28, 0xdc, 0x1f, 0x29, 0xee, 0x36, 0x1b, 0x29, 0x5b, 0x1d, 0x25, 0x4f, 0xe2, 0x9c, 0x73, + 0xe6, 0xdc, 0xb3, 0xde, 0xb3, 0x8c, 0x00, 0x9c, 0xb1, 0x73, 0xd4, 0x76, 0x5c, 0xdb, 0xb7, 0x51, + 0x8e, 0xfd, 0x6e, 0x55, 0xce, 0xa8, 0xaf, 0x2b, 0x58, 0xab, 0x4a, 0x5d, 0xfd, 0xd8, 0x0f, 0x1e, + 0x57, 0x27, 0xf6, 0xc4, 0xe6, 0x3f, 0x3f, 0x64, 0xbf, 0x04, 0x14, 0xb7, 0xa1, 0x4a, 0xe8, 0x57, + 0xe7, 0xd4, 0xf3, 0x9f, 0x53, 0x7d, 0x4c, 0x5d, 0x74, 0x07, 0xc0, 0x98, 0x9e, 0x7b, 0x3e, 0x75, + 0x47, 0xe6, 0xb8, 0xa9, 0x6d, 0x6a, 0x5b, 0x39, 0x52, 0x92, 0x90, 0xee, 0x18, 0x13, 0xa8, 0x11, + 0xea, 0x39, 0xb6, 0xe5, 0xd1, 0x2b, 0xbd, 0x80, 0xde, 0x83, 0x3c, 0x75, 0x5d, 0xdb, 0x6d, 0x66, + 0x36, 0xb5, 0xad, 0xf2, 0x93, 0x72, 0x9b, 0x4b, 0xbd, 0xc7, 0x40, 0x44, 0x60, 0xf0, 0x33, 0xc8, + 0xf3, 0x67, 0x74, 0x1f, 0x72, 0xfe, 0xcc, 0xa1, 0x9c, 0x49, 0xed, 0x49, 0x3d, 0x42, 0x3a, 0x9c, + 0x39, 0x94, 0x70, 0x24, 0x6a, 0xc2, 0xf2, 0x19, 0xf5, 0x3c, 0x7d, 0x42, 0x39, 0xcb, 0x12, 0x51, + 0x8f, 0xb8, 0x07, 0x30, 0xf4, 0x6c, 0xa9, 0x0e, 0xfa, 0x31, 0x14, 0x4e, 0xb8, 0x84, 0x9c, 0x5d, + 0xf9, 0xc9, 0x0d, 0xc1, 0x2e, 0xa6, 0x2d, 0x91, 0x24, 0x68, 0x15, 0xf2, 0x86, 0x7d, 0x6e, 0xf9, + 0x9c, 0x65, 0x95, 0x88, 0x07, 0xdc, 0x81, 0xd2, 0xd0, 0x3c, 0xa3, 0x9e, 0xaf, 0x9f, 0x39, 0xa8, + 0x05, 0x45, 0xe7, 0x64, 0xe6, 0x99, 0x86, 0x3e, 0xe5, 0x1c, 0xb3, 0x24, 0x78, 0x66, 0x32, 0x4d, + 0xed, 0x09, 0x47, 0x65, 0x38, 0x4a, 0x3d, 0xe2, 0xdf, 0x6b, 0x50, 0xe6, 0x42, 0x09, 0x9b, 0xa1, + 0xf7, 0x13, 0x52, 0xad, 0x2a, 0xa9, 0xa2, 0x36, 0x7d, 0xbb, 0x58, 0xe8, 0x03, 0x28, 0xf9, 0x4a, + 0xac, 0x66, 0x96, 0xb3, 0x91, 0xb6, 0x0a, 0xa4, 0x25, 0x21, 0x05, 0xfe, 0x56, 0x83, 0xc6, 0xb6, + 0x6d, 0xfb, 0x9e, 0xef, 0xea, 0x4e, 0x2a, 0xeb, 0xdc, 0x87, 0xbc, 0xe7, 0xdb, 0x2e, 0x95, 0x3e, + 0xac, 0xb6, 0x65, 0x9c, 0x0d, 0x18, 0x90, 0x08, 0x1c, 0xfa, 0x21, 0x14, 0x5c, 0x3a, 0x31, 0x6d, + 0x4b, 0x8a, 0x54, 0x53, 0x54, 0x84, 0x43, 0x89, 0xc4, 0xe2, 0x0e, 0xac, 0x44, 0xa4, 0x49, 0x63, + 0x16, 0xbc, 0x0b, 0x6b, 0x5d, 0x2f, 0x60, 0xe2, 0xd0, 0x71, 0x1a, 0xad, 0xf0, 0x6f, 0x60, 0x3d, + 0xc9, 0x25, 0x95, 0x93, 0x30, 0x54, 0x8e, 0x22, 0x5c, 0xb8, 0x91, 0x8a, 0x24, 0x06, 0xc3, 0x1f, + 0x43, 0xad, 0x33, 0x9d, 0xda, 0x46, 0x77, 0x37, 0x95, 0xa8, 0x3d, 0xa8, 0x07, 0xaf, 0xa7, 0x92, + 0xb1, 0x06, 0x19, 0x53, 0x48, 0x96, 0x23, 0x19, 0x73, 0x8c, 0xbf, 0x84, 0xfa, 0x3e, 0xf5, 0x85, + 0xff, 0xd2, 0x44, 0xc4, 0x4d, 0x28, 0x72, 0xaf, 0x8f, 0x02, 0xae, 0xcb, 0xfc, 0xb9, 0x3b, 0xc6, + 0x14, 0x1a, 0x21, 0xeb, 0x54, 0xc2, 0x5e, 0x25, 0xdc, 0xb0, 0x01, 0xf5, 0xfe, 0xf9, 0x35, 0x34, + 0xb8, 0xd2, 0x21, 0x9f, 0x42, 0x23, 0x3c, 0x24, 0x55, 0xa8, 0x6e, 0xc3, 0x8d, 0x7d, 0xea, 0x77, + 0xa6, 0x53, 0xce, 0xc4, 0x4b, 0xe5, 0xfd, 0x53, 0x58, 0x8d, 0xf3, 0x48, 0x65, 0xd5, 0x1f, 0x40, + 0x81, 0x2b, 0xe5, 0x35, 0x33, 0x9b, 0xd9, 0x79, 0x8d, 0x25, 0x12, 0xff, 0x8a, 0xbb, 0x4f, 0xe6, + 0x6c, 0x1a, 0xc3, 0xde, 0x01, 0x10, 0x99, 0x3e, 0x3a, 0xa5, 0x33, 0x6e, 0xdd, 0x0a, 0x29, 0x09, + 0xc8, 0x0b, 0x3a, 0xc3, 0x7f, 0xd6, 0x60, 0x25, 0x72, 0x40, 0x2a, 0x55, 0xc2, 0xab, 0x26, 0xf3, + 0xb6, 0xab, 0x06, 0x3d, 0x80, 0xc2, 0x54, 0x70, 0x15, 0x57, 0x52, 0x45, 0xd1, 0xf5, 0x29, 0xe3, + 0x26, 0x70, 0xf8, 0xd7, 0xdc, 0xbc, 0xe2, 0xd5, 0xed, 0x59, 0xba, 0x0c, 0x45, 0xb7, 0x40, 0xea, + 0x18, 0x66, 0x44, 0x51, 0x00, 0xba, 0x63, 0xfc, 0x0c, 0x36, 0xf6, 0xa9, 0xbf, 0x23, 0x6a, 0xe2, + 0x8e, 0x6d, 0x1d, 0x9b, 0x93, 0x54, 0x81, 0xe0, 0x41, 0x73, 0x9e, 0x4f, 0x2a, 0x0b, 0x3e, 0x84, + 0x65, 0x59, 0xa2, 0xa5, 0x09, 0xeb, 0xca, 0x34, 0x92, 0x3b, 0x51, 0x78, 0xfc, 0x15, 0x6c, 0xf4, + 0xcf, 0xaf, 0x2f, 0xfc, 0xff, 0x73, 0xe4, 0x73, 0x68, 0xce, 0x1f, 0x99, 0x2a, 0xfd, 0xfe, 0xae, + 0x41, 0xe1, 0x80, 0x9e, 0x1d, 0x51, 0x17, 0x21, 0xc8, 0x59, 0xfa, 0x99, 0x68, 0x2e, 0x4a, 0x84, + 0xff, 0x66, 0x5e, 0x3b, 0xe3, 0xd8, 0x88, 0xd7, 0x04, 0xa0, 0x3b, 0x66, 0x48, 0x87, 0x52, 0x77, + 0x74, 0xee, 0x4e, 0xbd, 0x66, 0x76, 0x33, 0xbb, 0x55, 0x22, 0x45, 0x06, 0x78, 0xe5, 0x4e, 0x3d, + 0x74, 0x0f, 0xca, 0xc6, 0xd4, 0xa4, 0x96, 0x2f, 0xd0, 0x39, 0x8e, 0x06, 0x01, 0xe2, 0x04, 0x3f, + 0x82, 0xba, 0x88, 0xaf, 0x91, 0xe3, 0x9a, 0xb6, 0x6b, 0xfa, 0xb3, 0x66, 0x7e, 0x53, 0xdb, 0xca, + 0x93, 0x9a, 0x00, 0xf7, 0x25, 0x14, 0x7f, 0xca, 0xf3, 0x41, 0x08, 0x99, 0xee, 0x7e, 0xf8, 0xa7, + 0x06, 0x28, 0xca, 0x22, 0x65, 0x4e, 0x2d, 0x0b, 0xcd, 0xd5, 0xfd, 0x50, 0x11, 0xe4, 0x82, 0x2b, + 0x51, 0xc8, 0x05, 0x39, 0x15, 0x25, 0x93, 0x38, 0xf4, 0x01, 0x94, 0xa9, 0x6f, 0x8c, 0x47, 0x92, + 0x34, 0xb7, 0x80, 0x14, 0x18, 0xc1, 0x4b, 0xa1, 0x41, 0x1f, 0x4a, 0x2c, 0x25, 0x07, 0xbe, 0xee, + 0x7b, 0x68, 0x13, 0x72, 0xcc, 0xcc, 0x52, 0xea, 0x78, 0xce, 0x72, 0x0c, 0x7a, 0x0f, 0x2a, 0x63, + 0xfb, 0xb5, 0x35, 0xf2, 0xa8, 0x61, 0x5b, 0x63, 0x4f, 0x7a, 0xae, 0xcc, 0x60, 0x03, 0x01, 0xc2, + 0xdf, 0xe4, 0x60, 0x5d, 0xa4, 0xf4, 0x73, 0xaa, 0xbb, 0xfe, 0x11, 0xd5, 0xfd, 0x54, 0x51, 0xfb, + 0x9d, 0x5e, 0x35, 0xa8, 0x0d, 0xc0, 0x05, 0x67, 0x5a, 0x88, 0xa0, 0x09, 0x5a, 0xb7, 0x40, 0x7f, + 0x52, 0x62, 0x24, 0xec, 0xd1, 0x43, 0x1f, 0x41, 0xd5, 0xa1, 0xd6, 0xd8, 0xb4, 0x26, 0xf2, 0x95, + 0xbc, 0x74, 0x4d, 0x94, 0x79, 0x45, 0x92, 0x88, 0x57, 0xee, 0x43, 0xf5, 0x68, 0xe6, 0x53, 0x6f, + 0xf4, 0xda, 0x35, 0x7d, 0x9f, 0x5a, 0xcd, 0x02, 0x37, 0x4e, 0x85, 0x03, 0xbf, 0x10, 0x30, 0x76, + 0x47, 0x0b, 0x22, 0x97, 0xea, 0xe3, 0xe6, 0xb2, 0xe8, 0xd9, 0x39, 0x84, 0x50, 0x9d, 0xf5, 0xec, + 0x95, 0x53, 0x3a, 0x0b, 0x59, 0x14, 0x85, 0x7d, 0x19, 0x4c, 0x71, 0xb8, 0x05, 0x25, 0x4e, 0xc2, + 0x19, 0x94, 0x44, 0xe6, 0x30, 0x00, 0x7f, 0xff, 0x21, 0x34, 0x74, 0xc7, 0x71, 0xed, 0xdf, 0x9a, + 0x67, 0xba, 0x4f, 0x47, 0x9e, 0xf9, 0x35, 0x6d, 0x02, 0xa7, 0xa9, 0x47, 0xe0, 0x03, 0xf3, 0x6b, + 0x8a, 0xda, 0x50, 0x34, 0x2d, 0x9f, 0xba, 0x17, 0xfa, 0xb4, 0x59, 0xe1, 0x96, 0x43, 0x61, 0x2b, + 0xdb, 0x95, 0x18, 0x12, 0xd0, 0x24, 0x59, 0xb3, 0x23, 0x9b, 0xd5, 0x39, 0xd6, 0x2f, 0xe8, 0xcc, + 0xfb, 0x2c, 0x57, 0x2c, 0x37, 0x2a, 0xf8, 0x04, 0x60, 0xe7, 0x44, 0xb7, 0x26, 0x94, 0x99, 0xe7, + 0x0a, 0xb1, 0xf5, 0x14, 0xca, 0x06, 0xa7, 0x1f, 0xf1, 0x51, 0x24, 0xc3, 0x47, 0x91, 0x8d, 0xb6, + 0x9a, 0xa5, 0xd8, 0x6d, 0x24, 0xf8, 0xf1, 0x91, 0x04, 0x8c, 0xe0, 0x37, 0x7e, 0x02, 0xb5, 0xa1, + 0xab, 0x5b, 0xde, 0x31, 0x75, 0x45, 0x58, 0xbf, 0xfb, 0x34, 0xfc, 0x21, 0xe4, 0x0f, 0xa8, 0x3b, + 0xe1, 0xdd, 0xb3, 0xaf, 0xbb, 0x13, 0xea, 0x4b, 0xe2, 0xb9, 0x38, 0x13, 0x58, 0xfc, 0x14, 0xca, + 0x03, 0x67, 0x6a, 0xca, 0x72, 0x85, 0x1e, 0x42, 0xc1, 0xb1, 0xa7, 0xa6, 0x31, 0x93, 0x33, 0xd3, + 0x8a, 0x30, 0xde, 0xce, 0x09, 0x35, 0x4e, 0xfb, 0x1c, 0x41, 0x24, 0x01, 0xfe, 0x4b, 0x16, 0x36, + 0xe6, 0x32, 0x22, 0xd5, 0x55, 0xf1, 0x51, 0x60, 0x22, 0xae, 0x9d, 0x48, 0x8c, 0x86, 0x3a, 0x59, + 0xd9, 0x5a, 0xd9, 0x86, 0xdb, 0xfd, 0x63, 0xa8, 0xfb, 0xd2, 0x36, 0xa3, 0x58, 0x9e, 0xc8, 0x93, + 0xe2, 0x86, 0x23, 0x35, 0x3f, 0x6e, 0xc8, 0x58, 0x75, 0xcd, 0xc5, 0xab, 0x2b, 0xfa, 0x19, 0x54, + 0x24, 0x92, 0x3a, 0xb6, 0x71, 0xc2, 0xaf, 0x59, 0x96, 0xd5, 0x31, 0x03, 0xee, 0x31, 0x14, 0x29, + 0xbb, 0xe1, 0x03, 0xbb, 0xa3, 0x84, 0x51, 0x85, 0x1a, 0x85, 0x05, 0x4e, 0x02, 0x41, 0xd0, 0x17, + 0x97, 0x4e, 0xfe, 0x8c, 0xb9, 0x8a, 0xa7, 0x4b, 0x30, 0xc8, 0x72, 0xef, 0x11, 0x81, 0x41, 0x3f, + 0x85, 0x8a, 0xc7, 0x9c, 0x33, 0x92, 0x57, 0x46, 0x91, 0x53, 0x4a, 0x9f, 0x44, 0xdc, 0x46, 0xca, + 0x5e, 0xf8, 0x80, 0x8f, 0xa1, 0xde, 0xf1, 0x4e, 0x25, 0xfa, 0xfb, 0xbb, 0xa2, 0xf0, 0x37, 0x1a, + 0x34, 0xc2, 0x83, 0x52, 0x8e, 0x3a, 0x55, 0x8b, 0xbe, 0x1e, 0x25, 0x3b, 0x9d, 0xb2, 0x45, 0x5f, + 0x13, 0xe5, 0x8e, 0x4d, 0xa8, 0x30, 0x1a, 0x5e, 0x3a, 0xcd, 0xb1, 0xa8, 0x9c, 0x39, 0x02, 0x16, + 0x7d, 0xcd, 0xcc, 0xd8, 0x1d, 0x7b, 0xf8, 0x4f, 0x1a, 0x20, 0x42, 0x1d, 0xdb, 0xf5, 0xd3, 0x2b, + 0x8d, 0x21, 0x37, 0xa5, 0xc7, 0xfe, 0x25, 0x2a, 0x73, 0x1c, 0x7a, 0x00, 0x79, 0xd7, 0x9c, 0x9c, + 0xf8, 0x97, 0x0c, 0xa4, 0x02, 0x89, 0x77, 0xe0, 0x46, 0x4c, 0x98, 0x54, 0x7d, 0xc6, 0xb7, 0x1a, + 0xac, 0x76, 0xbc, 0xd3, 0x6d, 0xdd, 0x37, 0x4e, 0xbe, 0x77, 0x4f, 0xb2, 0xe6, 0x43, 0xc4, 0x99, + 0x58, 0x0e, 0x64, 0xf9, 0x72, 0x00, 0x38, 0x68, 0x87, 0x2f, 0x2e, 0x7a, 0xb0, 0xcc, 0xa5, 0xe8, + 0xee, 0xce, 0xbb, 0x4c, 0x7b, 0xb7, 0xcb, 0x32, 0x73, 0x2e, 0x3b, 0x86, 0xb5, 0x84, 0x7a, 0xa9, + 0xe2, 0xe7, 0x1e, 0x64, 0x15, 0x7f, 0x36, 0x80, 0x84, 0x79, 0xd1, 0xdd, 0x25, 0x0c, 0x83, 0x1d, + 0x76, 0x47, 0x31, 0x67, 0x5c, 0xd3, 0x92, 0x5b, 0xb0, 0x2c, 0x34, 0x56, 0x87, 0x25, 0x4d, 0xa9, + 0xd0, 0xac, 0xd7, 0x9c, 0x3f, 0x31, 0x55, 0x0c, 0xfc, 0x12, 0x2a, 0xd1, 0xa2, 0xc5, 0x3a, 0x40, + 0xcf, 0xd7, 0x5d, 0x7f, 0x14, 0x2e, 0x6b, 0x84, 0xed, 0x6b, 0x1c, 0x1c, 0x6e, 0x96, 0xee, 0x43, + 0x95, 0x5a, 0xe3, 0x08, 0x99, 0xc8, 0xaa, 0x0a, 0xb5, 0xc6, 0x01, 0x11, 0xfe, 0x5b, 0x0e, 0x80, + 0x4f, 0x6a, 0xa2, 0x49, 0x8a, 0x0e, 0xe0, 0x5a, 0x6c, 0x00, 0x47, 0x2d, 0x28, 0x1a, 0xba, 0xa3, + 0x1b, 0xac, 0xe5, 0x94, 0x3d, 0xad, 0x7a, 0x46, 0xb7, 0xa1, 0xa4, 0x5f, 0xe8, 0xe6, 0x54, 0x3f, + 0x9a, 0x52, 0x1e, 0x37, 0x39, 0x12, 0x02, 0x58, 0xdd, 0x97, 0x71, 0x22, 0x02, 0x2b, 0xc7, 0x03, + 0x4b, 0x5e, 0x9a, 0x3c, 0xb2, 0xd0, 0xfb, 0x80, 0x3c, 0xd9, 0x91, 0x78, 0x96, 0xee, 0x48, 0xc2, + 0x3c, 0x27, 0x6c, 0x48, 0xcc, 0xc0, 0xd2, 0x1d, 0x41, 0xfd, 0x18, 0x56, 0x5d, 0x6a, 0x50, 0xf3, + 0x22, 0x41, 0x5f, 0xe0, 0xf4, 0x28, 0xc0, 0x85, 0x6f, 0xdc, 0x01, 0x08, 0x8d, 0xc6, 0xaf, 0xda, + 0x2a, 0x29, 0x05, 0xf6, 0x42, 0x6d, 0xb8, 0xa1, 0x3b, 0xce, 0x74, 0x96, 0xe0, 0x57, 0xe4, 0x74, + 0x2b, 0x0a, 0x15, 0xb2, 0xdb, 0x80, 0x65, 0xd3, 0x1b, 0x1d, 0x9d, 0x7b, 0x33, 0xde, 0xa4, 0x14, + 0x49, 0xc1, 0xf4, 0xb6, 0xcf, 0xbd, 0x19, 0xab, 0x28, 0xe7, 0x1e, 0x1d, 0x47, 0x7b, 0x93, 0x22, + 0x03, 0xf0, 0xa6, 0x64, 0xae, 0x87, 0x2a, 0x2f, 0xe8, 0xa1, 0x92, 0x4d, 0x52, 0x65, 0xbe, 0x49, + 0x8a, 0xb7, 0x59, 0xd5, 0x64, 0x9b, 0x15, 0xeb, 0xa1, 0x6a, 0x89, 0x1e, 0x2a, 0xda, 0x18, 0xd5, + 0xdf, 0xdd, 0x18, 0xe1, 0x29, 0xac, 0xf1, 0xf0, 0xb8, 0x6e, 0xbb, 0x9b, 0xf7, 0x58, 0x7c, 0xc5, + 0x8b, 0x7a, 0x18, 0x77, 0x44, 0xa0, 0xf1, 0x33, 0x58, 0x4f, 0x9e, 0x96, 0x2a, 0x67, 0xfe, 0xa1, + 0xc1, 0xea, 0xc0, 0xd0, 0x7d, 0x36, 0xfe, 0xa5, 0x5f, 0x39, 0xbc, 0x6d, 0xf8, 0xbe, 0xea, 0x5e, + 0x32, 0xd2, 0xc1, 0xe7, 0xde, 0xb2, 0x2c, 0xd8, 0x83, 0xb5, 0x84, 0xbc, 0x69, 0x37, 0x98, 0xfb, + 0xd4, 0xdf, 0xdf, 0x19, 0xe8, 0xc7, 0xb4, 0x6f, 0x9b, 0x56, 0x2a, 0x6f, 0x61, 0x0a, 0xeb, 0x49, + 0x2e, 0xa9, 0xae, 0x65, 0x96, 0x74, 0xfa, 0x31, 0x1d, 0x39, 0x8c, 0x87, 0x34, 0x60, 0xc9, 0x53, + 0x4c, 0xf1, 0x31, 0x34, 0x5f, 0x39, 0x63, 0xdd, 0xa7, 0xd7, 0x94, 0xf7, 0x5d, 0xe7, 0xd8, 0x70, + 0x73, 0xc1, 0x39, 0xa9, 0x34, 0x7a, 0x00, 0x35, 0x56, 0xd1, 0xe6, 0x4e, 0x63, 0x75, 0x6e, 0x10, + 0x51, 0x6c, 0x65, 0x30, 0xb3, 0x8c, 0x6b, 0x44, 0xde, 0x03, 0x28, 0x88, 0xc1, 0x58, 0x26, 0x4c, + 0x62, 0x1a, 0x16, 0x38, 0x3c, 0x05, 0x14, 0x3d, 0x27, 0x95, 0x46, 0x57, 0xae, 0x68, 0x8f, 0x7e, + 0x07, 0xa5, 0xe0, 0x9b, 0x09, 0x2a, 0x40, 0xa6, 0xf7, 0xa2, 0xb1, 0x84, 0xca, 0xb0, 0xfc, 0xea, + 0xf0, 0xc5, 0x61, 0xef, 0x8b, 0xc3, 0x86, 0x86, 0x56, 0xa1, 0x71, 0xd8, 0x1b, 0x8e, 0xb6, 0x7b, + 0xbd, 0xe1, 0x60, 0x48, 0x3a, 0xfd, 0xfe, 0xde, 0x6e, 0x23, 0x83, 0x6e, 0x40, 0x7d, 0x30, 0xec, + 0x91, 0xbd, 0xd1, 0xb0, 0x77, 0xb0, 0x3d, 0x18, 0xf6, 0x0e, 0xf7, 0x1a, 0x59, 0xd4, 0x84, 0xd5, + 0xce, 0x4b, 0xb2, 0xd7, 0xd9, 0xfd, 0x32, 0x4e, 0x9e, 0x63, 0x98, 0xee, 0xe1, 0x4e, 0xef, 0xa0, + 0xdf, 0x19, 0x76, 0xb7, 0x5f, 0xee, 0x8d, 0x3e, 0xdf, 0x23, 0x83, 0x6e, 0xef, 0xb0, 0x91, 0x7f, + 0xb4, 0x05, 0xe5, 0xc8, 0x00, 0x82, 0x8a, 0x90, 0x1b, 0xec, 0x74, 0x0e, 0x1b, 0x4b, 0xa8, 0x0e, + 0xe5, 0x4e, 0xbf, 0x4f, 0x7a, 0xbf, 0xe8, 0x1e, 0x74, 0x86, 0x7b, 0x0d, 0xed, 0xc9, 0x1f, 0xaa, + 0x90, 0xe9, 0xef, 0xa2, 0x0e, 0x40, 0xb8, 0xbf, 0x40, 0x1b, 0xc2, 0x0e, 0x73, 0x4b, 0x91, 0x56, + 0x73, 0x1e, 0x21, 0x4c, 0x85, 0x97, 0xd0, 0x63, 0xc8, 0x0e, 0x3d, 0x1b, 0xc9, 0x0b, 0x2b, 0xfc, + 0x0c, 0xd4, 0x5a, 0x89, 0x40, 0x14, 0xf5, 0x96, 0xf6, 0x58, 0x43, 0x9f, 0x40, 0x29, 0x58, 0xfe, + 0xa3, 0x75, 0x41, 0x95, 0xfc, 0x4c, 0xd2, 0xda, 0x98, 0x83, 0x07, 0x27, 0x1e, 0x40, 0x2d, 0xfe, + 0xf9, 0x00, 0xdd, 0x12, 0xc4, 0x0b, 0x3f, 0x4d, 0xb4, 0x6e, 0x2f, 0x46, 0x06, 0xec, 0x9e, 0xc2, + 0xb2, 0x5c, 0xf1, 0x23, 0x19, 0x08, 0xf1, 0x0f, 0x06, 0xad, 0xb5, 0x04, 0x34, 0x78, 0xf3, 0xe7, + 0x50, 0x54, 0x0b, 0x77, 0xb4, 0x16, 0x98, 0x28, 0xba, 0x19, 0x6f, 0xad, 0x27, 0xc1, 0xd1, 0x97, + 0xd5, 0x86, 0x5b, 0xbd, 0x9c, 0x58, 0xab, 0xab, 0x97, 0x93, 0x8b, 0x70, 0xbc, 0x84, 0xf6, 0xa1, + 0x12, 0x5d, 0x4c, 0xa3, 0x9b, 0xc1, 0x31, 0xc9, 0x85, 0x77, 0xab, 0xb5, 0x08, 0x15, 0xb5, 0x65, + 0xbc, 0x9c, 0x28, 0x5b, 0x2e, 0x2c, 0x69, 0xca, 0x96, 0x8b, 0x2b, 0x10, 0x5e, 0x42, 0x43, 0xa8, + 0x27, 0x26, 0x5d, 0x74, 0x5b, 0x25, 0xd7, 0xa2, 0x95, 0x50, 0xeb, 0xce, 0x25, 0xd8, 0x64, 0xc0, + 0x04, 0x7b, 0x62, 0x14, 0x5a, 0x34, 0x76, 0x7b, 0xb4, 0x36, 0xe6, 0xe0, 0x81, 0x54, 0xdb, 0x50, + 0xdd, 0xa7, 0x7e, 0xdf, 0xa5, 0x17, 0xe9, 0x79, 0x3c, 0xe3, 0x3c, 0xc2, 0x5d, 0x35, 0x6a, 0x25, + 0x68, 0x23, 0x0b, 0xec, 0xb7, 0xf1, 0xf9, 0x04, 0x8a, 0x6a, 0x14, 0x54, 0x6e, 0x4f, 0xcc, 0xa0, + 0xca, 0xed, 0xc9, 0x89, 0x11, 0x67, 0xff, 0x98, 0xd1, 0xd0, 0x3e, 0x94, 0x23, 0x43, 0x13, 0x6a, + 0x2a, 0xfb, 0x25, 0x87, 0xba, 0xd6, 0xcd, 0x05, 0x98, 0x28, 0xa3, 0xcf, 0xa0, 0x1a, 0x1b, 0x2c, + 0x94, 0x42, 0x8b, 0x86, 0xa9, 0xd6, 0xad, 0x85, 0xb8, 0x40, 0xa9, 0x01, 0x34, 0x92, 0xad, 0x3c, + 0xba, 0x13, 0x3d, 0x7f, 0x9e, 0xe3, 0xdd, 0xcb, 0xd0, 0x51, 0xa6, 0xc9, 0x9d, 0xbb, 0x62, 0x7a, + 0xc9, 0x4e, 0x5f, 0x31, 0xbd, 0x6c, 0x55, 0x2f, 0x98, 0x26, 0x17, 0xdc, 0x8a, 0xe9, 0x25, 0xbb, + 0x76, 0xc5, 0xf4, 0xb2, 0xbd, 0x38, 0x5e, 0x62, 0xa6, 0x8c, 0xb5, 0x26, 0xca, 0x94, 0x8b, 0xfa, + 0x2b, 0x65, 0xca, 0x85, 0xbd, 0x8c, 0x48, 0xc8, 0x78, 0x67, 0xa1, 0x12, 0x72, 0x61, 0xd7, 0xa2, + 0x12, 0x72, 0x71, 0x33, 0x82, 0x97, 0xd0, 0xe7, 0xb0, 0x32, 0x57, 0xd9, 0x91, 0xd4, 0xe8, 0xb2, + 0xd6, 0xa2, 0x75, 0xef, 0x52, 0x7c, 0x24, 0x1d, 0xca, 0x61, 0x61, 0x0d, 0x2a, 0xc7, 0x5c, 0x4d, + 0x57, 0x95, 0x63, 0xbe, 0x08, 0x8b, 0xd4, 0xde, 0x7e, 0xf4, 0xaf, 0x37, 0x77, 0xb5, 0x7f, 0xbf, + 0xb9, 0xab, 0xfd, 0xe7, 0xcd, 0x5d, 0xed, 0xaf, 0xff, 0xbd, 0xbb, 0x04, 0x4d, 0xc3, 0x3e, 0x6b, + 0x3b, 0xa6, 0x35, 0x31, 0x74, 0xa7, 0xed, 0x9b, 0xa7, 0x17, 0xed, 0xd3, 0x0b, 0xfe, 0x1f, 0x13, + 0x47, 0x05, 0xfe, 0xe7, 0x27, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x03, 0x4f, 0xc1, 0x7f, + 0x21, 0x00, 0x00, } diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden b/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/syndtr/goleveldb/LICENSE b/vendor/github.com/syndtr/goleveldb/LICENSE new file mode 100644 index 00000000000..4a772d1ab36 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/LICENSE @@ -0,0 +1,24 @@ +Copyright 2012 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go new file mode 100644 index 00000000000..225920002df --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go @@ -0,0 +1,349 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrBatchCorrupted records reason of batch corruption. This error will be +// wrapped with errors.ErrCorrupted. +type ErrBatchCorrupted struct { + Reason string +} + +func (e *ErrBatchCorrupted) Error() string { + return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) +} + +func newErrBatchCorrupted(reason string) error { + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason}) +} + +const ( + batchHeaderLen = 8 + 4 + batchGrowRec = 3000 + batchBufioSize = 16 +) + +// BatchReplay wraps basic batch operations. +type BatchReplay interface { + Put(key, value []byte) + Delete(key []byte) +} + +type batchIndex struct { + keyType keyType + keyPos, keyLen int + valuePos, valueLen int +} + +func (index batchIndex) k(data []byte) []byte { + return data[index.keyPos : index.keyPos+index.keyLen] +} + +func (index batchIndex) v(data []byte) []byte { + if index.valueLen != 0 { + return data[index.valuePos : index.valuePos+index.valueLen] + } + return nil +} + +func (index batchIndex) kv(data []byte) (key, value []byte) { + return index.k(data), index.v(data) +} + +// Batch is a write batch. +type Batch struct { + data []byte + index []batchIndex + + // internalLen is sums of key/value pair length plus 8-bytes internal key. + internalLen int +} + +func (b *Batch) grow(n int) { + o := len(b.data) + if cap(b.data)-o < n { + div := 1 + if len(b.index) > batchGrowRec { + div = len(b.index) / batchGrowRec + } + ndata := make([]byte, o, o+n+o/div) + copy(ndata, b.data) + b.data = ndata + } +} + +func (b *Batch) appendRec(kt keyType, key, value []byte) { + n := 1 + binary.MaxVarintLen32 + len(key) + if kt == keyTypeVal { + n += binary.MaxVarintLen32 + len(value) + } + b.grow(n) + index := batchIndex{keyType: kt} + o := len(b.data) + data := b.data[:o+n] + data[o] = byte(kt) + o++ + o += binary.PutUvarint(data[o:], uint64(len(key))) + index.keyPos = o + index.keyLen = len(key) + o += copy(data[o:], key) + if kt == keyTypeVal { + o += binary.PutUvarint(data[o:], uint64(len(value))) + index.valuePos = o + index.valueLen = len(value) + o += copy(data[o:], value) + } + b.data = data[:o] + b.index = append(b.index, index) + b.internalLen += index.keyLen + index.valueLen + 8 +} + +// Put appends 'put operation' of the given key/value pair to the batch. +// It is safe to modify the contents of the argument after Put returns but not +// before. +func (b *Batch) Put(key, value []byte) { + b.appendRec(keyTypeVal, key, value) +} + +// Delete appends 'delete operation' of the given key to the batch. +// It is safe to modify the contents of the argument after Delete returns but +// not before. +func (b *Batch) Delete(key []byte) { + b.appendRec(keyTypeDel, key, nil) +} + +// Dump dumps batch contents. The returned slice can be loaded into the +// batch using Load method. +// The returned slice is not its own copy, so the contents should not be +// modified. +func (b *Batch) Dump() []byte { + return b.data +} + +// Load loads given slice into the batch. Previous contents of the batch +// will be discarded. +// The given slice will not be copied and will be used as batch buffer, so +// it is not safe to modify the contents of the slice. +func (b *Batch) Load(data []byte) error { + return b.decode(data, -1) +} + +// Replay replays batch contents. +func (b *Batch) Replay(r BatchReplay) error { + for _, index := range b.index { + switch index.keyType { + case keyTypeVal: + r.Put(index.k(b.data), index.v(b.data)) + case keyTypeDel: + r.Delete(index.k(b.data)) + } + } + return nil +} + +// Len returns number of records in the batch. +func (b *Batch) Len() int { + return len(b.index) +} + +// Reset resets the batch. +func (b *Batch) Reset() { + b.data = b.data[:0] + b.index = b.index[:0] + b.internalLen = 0 +} + +func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error { + for i, index := range b.index { + if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil { + return err + } + } + return nil +} + +func (b *Batch) append(p *Batch) { + ob := len(b.data) + oi := len(b.index) + b.data = append(b.data, p.data...) + b.index = append(b.index, p.index...) + b.internalLen += p.internalLen + + // Updating index offset. + if ob != 0 { + for ; oi < len(b.index); oi++ { + index := &b.index[oi] + index.keyPos += ob + if index.valueLen != 0 { + index.valuePos += ob + } + } + } +} + +func (b *Batch) decode(data []byte, expectedLen int) error { + b.data = data + b.index = b.index[:0] + b.internalLen = 0 + err := decodeBatch(data, func(i int, index batchIndex) error { + b.index = append(b.index, index) + b.internalLen += index.keyLen + index.valueLen + 8 + return nil + }) + if err != nil { + return err + } + if expectedLen >= 0 && len(b.index) != expectedLen { + return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index))) + } + return nil +} + +func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error { + var ik []byte + for i, index := range b.index { + ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) + if err := mdb.Put(ik, index.v(b.data)); err != nil { + return err + } + } + return nil +} + +func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error { + var ik []byte + for i, index := range b.index { + ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) + if err := mdb.Delete(ik); err != nil { + return err + } + } + return nil +} + +func newBatch() interface{} { + return &Batch{} +} + +func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error { + var index batchIndex + for i, o := 0, 0; o < len(data); i++ { + // Key type. + index.keyType = keyType(data[o]) + if index.keyType > keyTypeVal { + return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType))) + } + o++ + + // Key. + x, n := binary.Uvarint(data[o:]) + o += n + if n <= 0 || o+int(x) > len(data) { + return newErrBatchCorrupted("bad record: invalid key length") + } + index.keyPos = o + index.keyLen = int(x) + o += index.keyLen + + // Value. + if index.keyType == keyTypeVal { + x, n = binary.Uvarint(data[o:]) + o += n + if n <= 0 || o+int(x) > len(data) { + return newErrBatchCorrupted("bad record: invalid value length") + } + index.valuePos = o + index.valueLen = int(x) + o += index.valueLen + } else { + index.valuePos = 0 + index.valueLen = 0 + } + + if err := fn(i, index); err != nil { + return err + } + } + return nil +} + +func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) { + seq, batchLen, err = decodeBatchHeader(data) + if err != nil { + return 0, 0, err + } + if seq < expectSeq { + return 0, 0, newErrBatchCorrupted("invalid sequence number") + } + data = data[batchHeaderLen:] + var ik []byte + var decodedLen int + err = decodeBatch(data, func(i int, index batchIndex) error { + if i >= batchLen { + return newErrBatchCorrupted("invalid records length") + } + ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType) + if err := mdb.Put(ik, index.v(data)); err != nil { + return err + } + decodedLen++ + return nil + }) + if err == nil && decodedLen != batchLen { + err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen)) + } + return +} + +func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte { + dst = ensureBuffer(dst, batchHeaderLen) + binary.LittleEndian.PutUint64(dst, seq) + binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen)) + return dst +} + +func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) { + if len(data) < batchHeaderLen { + return 0, 0, newErrBatchCorrupted("too short") + } + + seq = binary.LittleEndian.Uint64(data) + batchLen = int(binary.LittleEndian.Uint32(data[8:])) + if batchLen < 0 { + return 0, 0, newErrBatchCorrupted("invalid records length") + } + return +} + +func batchesLen(batches []*Batch) int { + batchLen := 0 + for _, batch := range batches { + batchLen += batch.Len() + } + return batchLen +} + +func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error { + if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil { + return err + } + for _, batch := range batches { + if _, err := wr.Write(batch.data); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go new file mode 100644 index 00000000000..c5940b232cd --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go @@ -0,0 +1,705 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package cache provides interface and implementation of a cache algorithms. +package cache + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Cacher provides interface to implements a caching functionality. +// An implementation must be safe for concurrent use. +type Cacher interface { + // Capacity returns cache capacity. + Capacity() int + + // SetCapacity sets cache capacity. + SetCapacity(capacity int) + + // Promote promotes the 'cache node'. + Promote(n *Node) + + // Ban evicts the 'cache node' and prevent subsequent 'promote'. + Ban(n *Node) + + // Evict evicts the 'cache node'. + Evict(n *Node) + + // EvictNS evicts 'cache node' with the given namespace. + EvictNS(ns uint64) + + // EvictAll evicts all 'cache node'. + EvictAll() + + // Close closes the 'cache tree' + Close() error +} + +// Value is a 'cacheable object'. It may implements util.Releaser, if +// so the the Release method will be called once object is released. +type Value interface{} + +// NamespaceGetter provides convenient wrapper for namespace. +type NamespaceGetter struct { + Cache *Cache + NS uint64 +} + +// Get simply calls Cache.Get() method. +func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { + return g.Cache.Get(g.NS, key, setFunc) +} + +// The hash tables implementation is based on: +// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, +// Kunlong Zhang, and Michael Spear. +// ACM Symposium on Principles of Distributed Computing, Jul 2014. + +const ( + mInitialSize = 1 << 4 + mOverflowThreshold = 1 << 5 + mOverflowGrowThreshold = 1 << 7 +) + +type mBucket struct { + mu sync.Mutex + node []*Node + frozen bool +} + +func (b *mBucket) freeze() []*Node { + b.mu.Lock() + defer b.mu.Unlock() + if !b.frozen { + b.frozen = true + } + return b.node +} + +func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { + b.mu.Lock() + + if b.frozen { + b.mu.Unlock() + return + } + + // Scan the node. + for _, n := range b.node { + if n.hash == hash && n.ns == ns && n.key == key { + atomic.AddInt32(&n.ref, 1) + b.mu.Unlock() + return true, false, n + } + } + + // Get only. + if noset { + b.mu.Unlock() + return true, false, nil + } + + // Create node. + n = &Node{ + r: r, + hash: hash, + ns: ns, + key: key, + ref: 1, + } + // Add node to bucket. + b.node = append(b.node, n) + bLen := len(b.node) + b.mu.Unlock() + + // Update counter. + grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold + if bLen > mOverflowThreshold { + grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold + } + + // Grow. + if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { + nhLen := len(h.buckets) << 1 + nh := &mNode{ + buckets: make([]unsafe.Pointer, nhLen), + mask: uint32(nhLen) - 1, + pred: unsafe.Pointer(h), + growThreshold: int32(nhLen * mOverflowThreshold), + shrinkThreshold: int32(nhLen >> 1), + } + ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) + if !ok { + panic("BUG: failed swapping head") + } + go nh.initBuckets() + } + + return true, true, n +} + +func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { + b.mu.Lock() + + if b.frozen { + b.mu.Unlock() + return + } + + // Scan the node. + var ( + n *Node + bLen int + ) + for i := range b.node { + n = b.node[i] + if n.ns == ns && n.key == key { + if atomic.LoadInt32(&n.ref) == 0 { + deleted = true + + // Call releaser. + if n.value != nil { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + n.value = nil + } + + // Remove node from bucket. + b.node = append(b.node[:i], b.node[i+1:]...) + bLen = len(b.node) + } + break + } + } + b.mu.Unlock() + + if deleted { + // Call OnDel. + for _, f := range n.onDel { + f() + } + + // Update counter. + atomic.AddInt32(&r.size, int32(n.size)*-1) + shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold + if bLen >= mOverflowThreshold { + atomic.AddInt32(&h.overflow, -1) + } + + // Shrink. + if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { + nhLen := len(h.buckets) >> 1 + nh := &mNode{ + buckets: make([]unsafe.Pointer, nhLen), + mask: uint32(nhLen) - 1, + pred: unsafe.Pointer(h), + growThreshold: int32(nhLen * mOverflowThreshold), + shrinkThreshold: int32(nhLen >> 1), + } + ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) + if !ok { + panic("BUG: failed swapping head") + } + go nh.initBuckets() + } + } + + return true, deleted +} + +type mNode struct { + buckets []unsafe.Pointer // []*mBucket + mask uint32 + pred unsafe.Pointer // *mNode + resizeInProgess int32 + + overflow int32 + growThreshold int32 + shrinkThreshold int32 +} + +func (n *mNode) initBucket(i uint32) *mBucket { + if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { + return b + } + + p := (*mNode)(atomic.LoadPointer(&n.pred)) + if p != nil { + var node []*Node + if n.mask > p.mask { + // Grow. + pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) + if pb == nil { + pb = p.initBucket(i & p.mask) + } + m := pb.freeze() + // Split nodes. + for _, x := range m { + if x.hash&n.mask == i { + node = append(node, x) + } + } + } else { + // Shrink. + pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) + if pb0 == nil { + pb0 = p.initBucket(i) + } + pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) + if pb1 == nil { + pb1 = p.initBucket(i + uint32(len(n.buckets))) + } + m0 := pb0.freeze() + m1 := pb1.freeze() + // Merge nodes. + node = make([]*Node, 0, len(m0)+len(m1)) + node = append(node, m0...) + node = append(node, m1...) + } + b := &mBucket{node: node} + if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { + if len(node) > mOverflowThreshold { + atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) + } + return b + } + } + + return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) +} + +func (n *mNode) initBuckets() { + for i := range n.buckets { + n.initBucket(uint32(i)) + } + atomic.StorePointer(&n.pred, nil) +} + +// Cache is a 'cache map'. +type Cache struct { + mu sync.RWMutex + mHead unsafe.Pointer // *mNode + nodes int32 + size int32 + cacher Cacher + closed bool +} + +// NewCache creates a new 'cache map'. The cacher is optional and +// may be nil. +func NewCache(cacher Cacher) *Cache { + h := &mNode{ + buckets: make([]unsafe.Pointer, mInitialSize), + mask: mInitialSize - 1, + growThreshold: int32(mInitialSize * mOverflowThreshold), + shrinkThreshold: 0, + } + for i := range h.buckets { + h.buckets[i] = unsafe.Pointer(&mBucket{}) + } + r := &Cache{ + mHead: unsafe.Pointer(h), + cacher: cacher, + } + return r +} + +func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { + h := (*mNode)(atomic.LoadPointer(&r.mHead)) + i := hash & h.mask + b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) + if b == nil { + b = h.initBucket(i) + } + return h, b +} + +func (r *Cache) delete(n *Node) bool { + for { + h, b := r.getBucket(n.hash) + done, deleted := b.delete(r, h, n.hash, n.ns, n.key) + if done { + return deleted + } + } + return false +} + +// Nodes returns number of 'cache node' in the map. +func (r *Cache) Nodes() int { + return int(atomic.LoadInt32(&r.nodes)) +} + +// Size returns sums of 'cache node' size in the map. +func (r *Cache) Size() int { + return int(atomic.LoadInt32(&r.size)) +} + +// Capacity returns cache capacity. +func (r *Cache) Capacity() int { + if r.cacher == nil { + return 0 + } + return r.cacher.Capacity() +} + +// SetCapacity sets cache capacity. +func (r *Cache) SetCapacity(capacity int) { + if r.cacher != nil { + r.cacher.SetCapacity(capacity) + } +} + +// Get gets 'cache node' with the given namespace and key. +// If cache node is not found and setFunc is not nil, Get will atomically creates +// the 'cache node' by calling setFunc. Otherwise Get will returns nil. +// +// The returned 'cache handle' should be released after use by calling Release +// method. +func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return nil + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) + if done { + if n != nil { + n.mu.Lock() + if n.value == nil { + if setFunc == nil { + n.mu.Unlock() + n.unref() + return nil + } + + n.size, n.value = setFunc() + if n.value == nil { + n.size = 0 + n.mu.Unlock() + n.unref() + return nil + } + atomic.AddInt32(&r.size, int32(n.size)) + } + n.mu.Unlock() + if r.cacher != nil { + r.cacher.Promote(n) + } + return &Handle{unsafe.Pointer(n)} + } + + break + } + } + return nil +} + +// Delete removes and ban 'cache node' with the given namespace and key. +// A banned 'cache node' will never inserted into the 'cache tree'. Ban +// only attributed to the particular 'cache node', so when a 'cache node' +// is recreated it will not be banned. +// +// If onDel is not nil, then it will be executed if such 'cache node' +// doesn't exist or once the 'cache node' is released. +// +// Delete return true is such 'cache node' exist. +func (r *Cache) Delete(ns, key uint64, onDel func()) bool { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return false + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, true) + if done { + if n != nil { + if onDel != nil { + n.mu.Lock() + n.onDel = append(n.onDel, onDel) + n.mu.Unlock() + } + if r.cacher != nil { + r.cacher.Ban(n) + } + n.unref() + return true + } + + break + } + } + + if onDel != nil { + onDel() + } + + return false +} + +// Evict evicts 'cache node' with the given namespace and key. This will +// simply call Cacher.Evict. +// +// Evict return true is such 'cache node' exist. +func (r *Cache) Evict(ns, key uint64) bool { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return false + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, true) + if done { + if n != nil { + if r.cacher != nil { + r.cacher.Evict(n) + } + n.unref() + return true + } + + break + } + } + + return false +} + +// EvictNS evicts 'cache node' with the given namespace. This will +// simply call Cacher.EvictNS. +func (r *Cache) EvictNS(ns uint64) { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return + } + + if r.cacher != nil { + r.cacher.EvictNS(ns) + } +} + +// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. +func (r *Cache) EvictAll() { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return + } + + if r.cacher != nil { + r.cacher.EvictAll() + } +} + +// Close closes the 'cache map' and forcefully releases all 'cache node'. +func (r *Cache) Close() error { + r.mu.Lock() + if !r.closed { + r.closed = true + + h := (*mNode)(r.mHead) + h.initBuckets() + + for i := range h.buckets { + b := (*mBucket)(h.buckets[i]) + for _, n := range b.node { + // Call releaser. + if n.value != nil { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + n.value = nil + } + + // Call OnDel. + for _, f := range n.onDel { + f() + } + n.onDel = nil + } + } + } + r.mu.Unlock() + + // Avoid deadlock. + if r.cacher != nil { + if err := r.cacher.Close(); err != nil { + return err + } + } + return nil +} + +// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but +// unlike Close it doesn't forcefully releases 'cache node'. +func (r *Cache) CloseWeak() error { + r.mu.Lock() + if !r.closed { + r.closed = true + } + r.mu.Unlock() + + // Avoid deadlock. + if r.cacher != nil { + r.cacher.EvictAll() + if err := r.cacher.Close(); err != nil { + return err + } + } + return nil +} + +// Node is a 'cache node'. +type Node struct { + r *Cache + + hash uint32 + ns, key uint64 + + mu sync.Mutex + size int + value Value + + ref int32 + onDel []func() + + CacheData unsafe.Pointer +} + +// NS returns this 'cache node' namespace. +func (n *Node) NS() uint64 { + return n.ns +} + +// Key returns this 'cache node' key. +func (n *Node) Key() uint64 { + return n.key +} + +// Size returns this 'cache node' size. +func (n *Node) Size() int { + return n.size +} + +// Value returns this 'cache node' value. +func (n *Node) Value() Value { + return n.value +} + +// Ref returns this 'cache node' ref counter. +func (n *Node) Ref() int32 { + return atomic.LoadInt32(&n.ref) +} + +// GetHandle returns an handle for this 'cache node'. +func (n *Node) GetHandle() *Handle { + if atomic.AddInt32(&n.ref, 1) <= 1 { + panic("BUG: Node.GetHandle on zero ref") + } + return &Handle{unsafe.Pointer(n)} +} + +func (n *Node) unref() { + if atomic.AddInt32(&n.ref, -1) == 0 { + n.r.delete(n) + } +} + +func (n *Node) unrefLocked() { + if atomic.AddInt32(&n.ref, -1) == 0 { + n.r.mu.RLock() + if !n.r.closed { + n.r.delete(n) + } + n.r.mu.RUnlock() + } +} + +// Handle is a 'cache handle' of a 'cache node'. +type Handle struct { + n unsafe.Pointer // *Node +} + +// Value returns the value of the 'cache node'. +func (h *Handle) Value() Value { + n := (*Node)(atomic.LoadPointer(&h.n)) + if n != nil { + return n.value + } + return nil +} + +// Release releases this 'cache handle'. +// It is safe to call release multiple times. +func (h *Handle) Release() { + nPtr := atomic.LoadPointer(&h.n) + if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { + n := (*Node)(nPtr) + n.unrefLocked() + } +} + +func murmur32(ns, key uint64, seed uint32) uint32 { + const ( + m = uint32(0x5bd1e995) + r = 24 + ) + + k1 := uint32(ns >> 32) + k2 := uint32(ns) + k3 := uint32(key >> 32) + k4 := uint32(key) + + k1 *= m + k1 ^= k1 >> r + k1 *= m + + k2 *= m + k2 ^= k2 >> r + k2 *= m + + k3 *= m + k3 ^= k3 >> r + k3 *= m + + k4 *= m + k4 ^= k4 >> r + k4 *= m + + h := seed + + h *= m + h ^= k1 + h *= m + h ^= k2 + h *= m + h ^= k3 + h *= m + h ^= k4 + + h ^= h >> 13 + h *= m + h ^= h >> 15 + + return h +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go new file mode 100644 index 00000000000..d9a84cde15e --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go @@ -0,0 +1,195 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "sync" + "unsafe" +) + +type lruNode struct { + n *Node + h *Handle + ban bool + + next, prev *lruNode +} + +func (n *lruNode) insert(at *lruNode) { + x := at.next + at.next = n + n.prev = at + n.next = x + x.prev = n +} + +func (n *lruNode) remove() { + if n.prev != nil { + n.prev.next = n.next + n.next.prev = n.prev + n.prev = nil + n.next = nil + } else { + panic("BUG: removing removed node") + } +} + +type lru struct { + mu sync.Mutex + capacity int + used int + recent lruNode +} + +func (r *lru) reset() { + r.recent.next = &r.recent + r.recent.prev = &r.recent + r.used = 0 +} + +func (r *lru) Capacity() int { + r.mu.Lock() + defer r.mu.Unlock() + return r.capacity +} + +func (r *lru) SetCapacity(capacity int) { + var evicted []*lruNode + + r.mu.Lock() + r.capacity = capacity + for r.used > r.capacity { + rn := r.recent.prev + if rn == nil { + panic("BUG: invalid LRU used or capacity counter") + } + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) Promote(n *Node) { + var evicted []*lruNode + + r.mu.Lock() + if n.CacheData == nil { + if n.Size() <= r.capacity { + rn := &lruNode{n: n, h: n.GetHandle()} + rn.insert(&r.recent) + n.CacheData = unsafe.Pointer(rn) + r.used += n.Size() + + for r.used > r.capacity { + rn := r.recent.prev + if rn == nil { + panic("BUG: invalid LRU used or capacity counter") + } + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + } + } else { + rn := (*lruNode)(n.CacheData) + if !rn.ban { + rn.remove() + rn.insert(&r.recent) + } + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) Ban(n *Node) { + r.mu.Lock() + if n.CacheData == nil { + n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) + } else { + rn := (*lruNode)(n.CacheData) + if !rn.ban { + rn.remove() + rn.ban = true + r.used -= rn.n.Size() + r.mu.Unlock() + + rn.h.Release() + rn.h = nil + return + } + } + r.mu.Unlock() +} + +func (r *lru) Evict(n *Node) { + r.mu.Lock() + rn := (*lruNode)(n.CacheData) + if rn == nil || rn.ban { + r.mu.Unlock() + return + } + n.CacheData = nil + r.mu.Unlock() + + rn.h.Release() +} + +func (r *lru) EvictNS(ns uint64) { + var evicted []*lruNode + + r.mu.Lock() + for e := r.recent.prev; e != &r.recent; { + rn := e + e = e.prev + if rn.n.NS() == ns { + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) EvictAll() { + r.mu.Lock() + back := r.recent.prev + for rn := back; rn != &r.recent; rn = rn.prev { + rn.n.CacheData = nil + } + r.reset() + r.mu.Unlock() + + for rn := back; rn != &r.recent; rn = rn.prev { + rn.h.Release() + } +} + +func (r *lru) Close() error { + return nil +} + +// NewLRU create a new LRU-cache. +func NewLRU(capacity int) Cacher { + r := &lru{capacity: capacity} + r.reset() + return r +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go new file mode 100644 index 00000000000..448402b8269 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go @@ -0,0 +1,67 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/comparer" +) + +type iComparer struct { + ucmp comparer.Comparer +} + +func (icmp *iComparer) uName() string { + return icmp.ucmp.Name() +} + +func (icmp *iComparer) uCompare(a, b []byte) int { + return icmp.ucmp.Compare(a, b) +} + +func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { + return icmp.ucmp.Separator(dst, a, b) +} + +func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { + return icmp.ucmp.Successor(dst, b) +} + +func (icmp *iComparer) Name() string { + return icmp.uName() +} + +func (icmp *iComparer) Compare(a, b []byte) int { + x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey()) + if x == 0 { + if m, n := internalKey(a).num(), internalKey(b).num(); m > n { + return -1 + } else if m < n { + return 1 + } + } + return x +} + +func (icmp *iComparer) Separator(dst, a, b []byte) []byte { + ua, ub := internalKey(a).ukey(), internalKey(b).ukey() + dst = icmp.uSeparator(dst, ua, ub) + if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { + // Append earliest possible number. + return append(dst, keyMaxNumBytes...) + } + return nil +} + +func (icmp *iComparer) Successor(dst, b []byte) []byte { + ub := internalKey(b).ukey() + dst = icmp.uSuccessor(dst, ub) + if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { + // Append earliest possible number. + return append(dst, keyMaxNumBytes...) + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go new file mode 100644 index 00000000000..14dddf88dd2 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go @@ -0,0 +1,51 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package comparer + +import "bytes" + +type bytesComparer struct{} + +func (bytesComparer) Compare(a, b []byte) int { + return bytes.Compare(a, b) +} + +func (bytesComparer) Name() string { + return "leveldb.BytewiseComparator" +} + +func (bytesComparer) Separator(dst, a, b []byte) []byte { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for ; i < n && a[i] == b[i]; i++ { + } + if i >= n { + // Do not shorten if one string is a prefix of the other + } else if c := a[i]; c < 0xff && c+1 < b[i] { + dst = append(dst, a[:i+1]...) + dst[i]++ + return dst + } + return nil +} + +func (bytesComparer) Successor(dst, b []byte) []byte { + for i, c := range b { + if c != 0xff { + dst = append(dst, b[:i+1]...) + dst[i]++ + return dst + } + } + return nil +} + +// DefaultComparer are default implementation of the Comparer interface. +// It uses the natural ordering, consistent with bytes.Compare. +var DefaultComparer = bytesComparer{} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go new file mode 100644 index 00000000000..14a28f16fce --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go @@ -0,0 +1,57 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package comparer provides interface and implementation for ordering +// sets of data. +package comparer + +// BasicComparer is the interface that wraps the basic Compare method. +type BasicComparer interface { + // Compare returns -1, 0, or +1 depending on whether a is 'less than', + // 'equal to' or 'greater than' b. The two arguments can only be 'equal' + // if their contents are exactly equal. Furthermore, the empty slice + // must be 'less than' any non-empty slice. + Compare(a, b []byte) int +} + +// Comparer defines a total ordering over the space of []byte keys: a 'less +// than' relationship. +type Comparer interface { + BasicComparer + + // Name returns name of the comparer. + // + // The Level-DB on-disk format stores the comparer name, and opening a + // database with a different comparer from the one it was created with + // will result in an error. + // + // An implementation to a new name whenever the comparer implementation + // changes in a way that will cause the relative ordering of any two keys + // to change. + // + // Names starting with "leveldb." are reserved and should not be used + // by any users of this package. + Name() string + + // Bellow are advanced functions used used to reduce the space requirements + // for internal data structures such as index blocks. + + // Separator appends a sequence of bytes x to dst such that a <= x && x < b, + // where 'less than' is consistent with Compare. An implementation should + // return nil if x equal to a. + // + // Either contents of a or b should not by any means modified. Doing so + // may cause corruption on the internal state. + Separator(dst, a, b []byte) []byte + + // Successor appends a sequence of bytes x to dst such that x >= b, where + // 'less than' is consistent with Compare. An implementation should return + // nil if x equal to b. + // + // Contents of b should not by any means modified. Doing so may cause + // corruption on the internal state. + Successor(dst, b []byte) []byte +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go new file mode 100644 index 00000000000..e7ac0654187 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go @@ -0,0 +1,1175 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "container/list" + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/table" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// DB is a LevelDB database. +type DB struct { + // Need 64-bit alignment. + seq uint64 + + // Stats. Need 64-bit alignment. + cWriteDelay int64 // The cumulative duration of write delays + cWriteDelayN int32 // The cumulative number of write delays + inWritePaused int32 // The indicator whether write operation is paused by compaction + aliveSnaps, aliveIters int32 + + // Session. + s *session + + // MemDB. + memMu sync.RWMutex + memPool chan *memdb.DB + mem, frozenMem *memDB + journal *journal.Writer + journalWriter storage.Writer + journalFd storage.FileDesc + frozenJournalFd storage.FileDesc + frozenSeq uint64 + + // Snapshot. + snapsMu sync.Mutex + snapsList *list.List + + // Write. + batchPool sync.Pool + writeMergeC chan writeMerge + writeMergedC chan bool + writeLockC chan struct{} + writeAckC chan error + writeDelay time.Duration + writeDelayN int + tr *Transaction + + // Compaction. + compCommitLk sync.Mutex + tcompCmdC chan cCmd + tcompPauseC chan chan<- struct{} + mcompCmdC chan cCmd + compErrC chan error + compPerErrC chan error + compErrSetC chan error + compWriteLocking bool + compStats cStats + memdbMaxLevel int // For testing. + + // Close. + closeW sync.WaitGroup + closeC chan struct{} + closed uint32 + closer io.Closer +} + +func openDB(s *session) (*DB, error) { + s.log("db@open opening") + start := time.Now() + db := &DB{ + s: s, + // Initial sequence + seq: s.stSeqNum, + // MemDB + memPool: make(chan *memdb.DB, 1), + // Snapshot + snapsList: list.New(), + // Write + batchPool: sync.Pool{New: newBatch}, + writeMergeC: make(chan writeMerge), + writeMergedC: make(chan bool), + writeLockC: make(chan struct{}, 1), + writeAckC: make(chan error), + // Compaction + tcompCmdC: make(chan cCmd), + tcompPauseC: make(chan chan<- struct{}), + mcompCmdC: make(chan cCmd), + compErrC: make(chan error), + compPerErrC: make(chan error), + compErrSetC: make(chan error), + // Close + closeC: make(chan struct{}), + } + + // Read-only mode. + readOnly := s.o.GetReadOnly() + + if readOnly { + // Recover journals (read-only mode). + if err := db.recoverJournalRO(); err != nil { + return nil, err + } + } else { + // Recover journals. + if err := db.recoverJournal(); err != nil { + return nil, err + } + + // Remove any obsolete files. + if err := db.checkAndCleanFiles(); err != nil { + // Close journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return nil, err + } + + } + + // Doesn't need to be included in the wait group. + go db.compactionError() + go db.mpoolDrain() + + if readOnly { + db.SetReadOnly() + } else { + db.closeW.Add(2) + go db.tCompaction() + go db.mCompaction() + // go db.jWriter() + } + + s.logf("db@open done T·%v", time.Since(start)) + + runtime.SetFinalizer(db, (*DB).Close) + return db, nil +} + +// Open opens or creates a DB for the given storage. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist Open will returns +// os.ErrExist error. +// +// Open will return an error with type of ErrCorrupted if corruption +// detected in the DB. Use errors.IsCorrupted to test whether an error is +// due to corruption. Corrupted DB can be recovered with Recover function. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = s.recover() + if err != nil { + if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { + return + } + err = s.create() + if err != nil { + return + } + } else if s.o.GetErrorIfExist() { + err = os.ErrExist + return + } + + return openDB(s) +} + +// OpenFile opens or creates a DB for the given path. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist OpenFile will returns +// os.ErrExist error. +// +// OpenFile uses standard file-system backed storage implementation as +// described in the leveldb/storage package. +// +// OpenFile will return an error with type of ErrCorrupted if corruption +// detected in the DB. Use errors.IsCorrupted to test whether an error is +// due to corruption. Corrupted DB can be recovered with Recover function. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func OpenFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path, o.GetReadOnly()) + if err != nil { + return + } + db, err = Open(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +// Recover recovers and opens a DB with missing or corrupted manifest files +// for the given storage. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = recoverTable(s, o) + if err != nil { + return + } + return openDB(s) +} + +// RecoverFile recovers and opens a DB with missing or corrupted manifest files +// for the given path. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// RecoverFile uses standard file-system backed storage implementation as described +// in the leveldb/storage package. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func RecoverFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path, false) + if err != nil { + return + } + db, err = Recover(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +func recoverTable(s *session, o *opt.Options) error { + o = dupOptions(o) + // Mask StrictReader, lets StrictRecovery doing its job. + o.Strict &= ^opt.StrictReader + + // Get all tables and sort it by file number. + fds, err := s.stor.List(storage.TypeTable) + if err != nil { + return err + } + sortFds(fds) + + var ( + maxSeq uint64 + recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int + + // We will drop corrupted table. + strict = o.GetStrict(opt.StrictRecovery) + noSync = o.GetNoSync() + + rec = &sessionRecord{} + bpool = util.NewBufferPool(o.GetBlockSize() + 5) + ) + buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) { + tmpFd = s.newTemp() + writer, err := s.stor.Create(tmpFd) + if err != nil { + return + } + defer func() { + writer.Close() + if err != nil { + s.stor.Remove(tmpFd) + tmpFd = storage.FileDesc{} + } + }() + + // Copy entries. + tw := table.NewWriter(writer, o) + for iter.Next() { + key := iter.Key() + if validInternalKey(key) { + err = tw.Append(key, iter.Value()) + if err != nil { + return + } + } + } + err = iter.Error() + if err != nil && !errors.IsCorrupted(err) { + return + } + err = tw.Close() + if err != nil { + return + } + if !noSync { + err = writer.Sync() + if err != nil { + return + } + } + size = int64(tw.BytesLen()) + return + } + recoverTable := func(fd storage.FileDesc) error { + s.logf("table@recovery recovering @%d", fd.Num) + reader, err := s.stor.Open(fd) + if err != nil { + return err + } + var closed bool + defer func() { + if !closed { + reader.Close() + } + }() + + // Get file size. + size, err := reader.Seek(0, 2) + if err != nil { + return err + } + + var ( + tSeq uint64 + tgoodKey, tcorruptedKey, tcorruptedBlock int + imin, imax []byte + ) + tr, err := table.NewReader(reader, size, fd, nil, bpool, o) + if err != nil { + return err + } + iter := tr.NewIterator(nil, nil) + if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { + itererr.SetErrorCallback(func(err error) { + if errors.IsCorrupted(err) { + s.logf("table@recovery block corruption @%d %q", fd.Num, err) + tcorruptedBlock++ + } + }) + } + + // Scan the table. + for iter.Next() { + key := iter.Key() + _, seq, _, kerr := parseInternalKey(key) + if kerr != nil { + tcorruptedKey++ + continue + } + tgoodKey++ + if seq > tSeq { + tSeq = seq + } + if imin == nil { + imin = append([]byte{}, key...) + } + imax = append(imax[:0], key...) + } + if err := iter.Error(); err != nil && !errors.IsCorrupted(err) { + iter.Release() + return err + } + iter.Release() + + goodKey += tgoodKey + corruptedKey += tcorruptedKey + corruptedBlock += tcorruptedBlock + + if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { + droppedTable++ + s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + return nil + } + + if tgoodKey > 0 { + if tcorruptedKey > 0 || tcorruptedBlock > 0 { + // Rebuild the table. + s.logf("table@recovery rebuilding @%d", fd.Num) + iter := tr.NewIterator(nil, nil) + tmpFd, newSize, err := buildTable(iter) + iter.Release() + if err != nil { + return err + } + closed = true + reader.Close() + if err := s.stor.Rename(tmpFd, fd); err != nil { + return err + } + size = newSize + } + if tSeq > maxSeq { + maxSeq = tSeq + } + recoveredKey += tgoodKey + // Add table to level 0. + rec.addTable(0, fd.Num, size, imin, imax) + s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + } else { + droppedTable++ + s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size) + } + + return nil + } + + // Recover all tables. + if len(fds) > 0 { + s.logf("table@recovery F·%d", len(fds)) + + // Mark file number as used. + s.markFileNum(fds[len(fds)-1].Num) + + for _, fd := range fds { + if err := recoverTable(fd); err != nil { + return err + } + } + + s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq) + } + + // Set sequence number. + rec.setSeqNum(maxSeq) + + // Create new manifest. + if err := s.create(); err != nil { + return err + } + + // Commit. + return s.commit(rec) +} + +func (db *DB) recoverJournal() error { + // Get all journals and sort it by file number. + rawFds, err := db.s.stor.List(storage.TypeJournal) + if err != nil { + return err + } + sortFds(rawFds) + + // Journals that will be recovered. + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) + } + } + + var ( + ofd storage.FileDesc // Obsolete file. + rec = &sessionRecord{} + ) + + // Recover journals. + if len(fds) > 0 { + db.logf("journal@recovery F·%d", len(fds)) + + // Mark file number as used. + db.s.markFileNum(fds[len(fds)-1].Num) + + var ( + // Options. + strict = db.s.o.GetStrict(opt.StrictJournal) + checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) + writeBuffer = db.s.o.GetWriteBuffer() + + jr *journal.Reader + mdb = memdb.New(db.s.icmp, writeBuffer) + buf = &util.Buffer{} + batchSeq uint64 + batchLen int + ) + + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) + + fr, err := db.s.stor.Open(fd) + if err != nil { + return err + } + + // Create or reset journal reader instance. + if jr == nil { + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) + } else { + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) + } + + // Flush memdb and remove obsolete journal file. + if !ofd.Zero() { + if mdb.Len() > 0 { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + fr.Close() + return err + } + } + + rec.setJournalNum(fd.Num) + rec.setSeqNum(db.seq) + if err := db.s.commit(rec); err != nil { + fr.Close() + return err + } + rec.resetAddedTables() + + db.s.stor.Remove(ofd) + ofd = storage.FileDesc{} + } + + // Replay journal to memdb. + mdb.Reset() + for { + r, err := jr.Next() + if err != nil { + if err == io.EOF { + break + } + + fr.Close() + return errors.SetFd(err, fd) + } + + buf.Reset() + if _, err := buf.ReadFrom(r); err != nil { + if err == io.ErrUnexpectedEOF { + // This is error returned due to corruption, with strict == false. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) + if err != nil { + if !strict && errors.IsCorrupted(err) { + db.s.logf("journal error: %v (skipped)", err) + // We won't apply sequence number as it might be corrupted. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + + // Save sequence number. + db.seq = batchSeq + uint64(batchLen) + + // Flush it if large enough. + if mdb.Size() >= writeBuffer { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + fr.Close() + return err + } + + mdb.Reset() + } + } + + fr.Close() + ofd = fd + } + + // Flush the last memdb. + if mdb.Len() > 0 { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + return err + } + } + } + + // Create a new journal. + if _, err := db.newMem(0); err != nil { + return err + } + + // Commit. + rec.setJournalNum(db.journalFd.Num) + rec.setSeqNum(db.seq) + if err := db.s.commit(rec); err != nil { + // Close journal on error. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return err + } + + // Remove the last obsolete journal file. + if !ofd.Zero() { + db.s.stor.Remove(ofd) + } + + return nil +} + +func (db *DB) recoverJournalRO() error { + // Get all journals and sort it by file number. + rawFds, err := db.s.stor.List(storage.TypeJournal) + if err != nil { + return err + } + sortFds(rawFds) + + // Journals that will be recovered. + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) + } + } + + var ( + // Options. + strict = db.s.o.GetStrict(opt.StrictJournal) + checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) + writeBuffer = db.s.o.GetWriteBuffer() + + mdb = memdb.New(db.s.icmp, writeBuffer) + ) + + // Recover journals. + if len(fds) > 0 { + db.logf("journal@recovery RO·Mode F·%d", len(fds)) + + var ( + jr *journal.Reader + buf = &util.Buffer{} + batchSeq uint64 + batchLen int + ) + + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) + + fr, err := db.s.stor.Open(fd) + if err != nil { + return err + } + + // Create or reset journal reader instance. + if jr == nil { + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) + } else { + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) + } + + // Replay journal to memdb. + for { + r, err := jr.Next() + if err != nil { + if err == io.EOF { + break + } + + fr.Close() + return errors.SetFd(err, fd) + } + + buf.Reset() + if _, err := buf.ReadFrom(r); err != nil { + if err == io.ErrUnexpectedEOF { + // This is error returned due to corruption, with strict == false. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) + if err != nil { + if !strict && errors.IsCorrupted(err) { + db.s.logf("journal error: %v (skipped)", err) + // We won't apply sequence number as it might be corrupted. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + + // Save sequence number. + db.seq = batchSeq + uint64(batchLen) + } + + fr.Close() + } + } + + // Set memDB. + db.mem = &memDB{db: db, DB: mdb, ref: 1} + + return nil +} + +func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) { + mk, mv, err := mdb.Find(ikey) + if err == nil { + ukey, _, kt, kerr := parseInternalKey(mk) + if kerr != nil { + // Shouldn't have had happen. + panic(kerr) + } + if icmp.uCompare(ukey, ikey.ukey()) == 0 { + if kt == keyTypeDel { + return true, nil, ErrNotFound + } + return true, mv, nil + + } + } else if err != ErrNotFound { + return true, nil, err + } + return +} + +func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me + } + } + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me + } + } + + v := db.s.version() + value, cSched, err := v.get(auxt, ikey, ro, false) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + return +} + +func nilIfNotFound(err error) error { + if err == ErrNotFound { + return nil + } + return err +} + +func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } + + v := db.s.version() + _, cSched, err := v.get(auxt, ikey, ro, true) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + if err == nil { + ret = true + } else if err == ErrNotFound { + err = nil + } + return +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contains the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = db.ok() + if err != nil { + return + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + return db.get(nil, nil, key, se.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Has returns. +func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { + err = db.ok() + if err != nil { + return + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + return db.has(nil, nil, key, se.seq, ro) +} + +// NewIterator returns an iterator for the latest snapshot of the +// underlying DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + // Iterator holds 'version' lock, 'version' is immutable so snapshot + // can be released after iterator created. + return db.newIterator(nil, nil, se.seq, slice, ro) +} + +// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot +// is a frozen snapshot of a DB state at a particular point in time. The +// content of snapshot are guaranteed to be consistent. +// +// The snapshot must be released after use, by calling Release method. +func (db *DB) GetSnapshot() (*Snapshot, error) { + if err := db.ok(); err != nil { + return nil, err + } + + return db.newSnapshot(), nil +} + +// GetProperty returns value of the given property name. +// +// Property names: +// leveldb.num-files-at-level{n} +// Returns the number of files at level 'n'. +// leveldb.stats +// Returns statistics of the underlying DB. +// leveldb.iostats +// Returns statistics of effective disk read and write. +// leveldb.writedelay +// Returns cumulative write delay caused by compaction. +// leveldb.sstables +// Returns sstables list for each level. +// leveldb.blockpool +// Returns block pool stats. +// leveldb.cachedblock +// Returns size of cached block. +// leveldb.openedtables +// Returns number of opened tables. +// leveldb.alivesnaps +// Returns number of alive snapshots. +// leveldb.aliveiters +// Returns number of alive iterators. +func (db *DB) GetProperty(name string) (value string, err error) { + err = db.ok() + if err != nil { + return + } + + const prefix = "leveldb." + if !strings.HasPrefix(name, prefix) { + return "", ErrNotFound + } + p := name[len(prefix):] + + v := db.s.version() + defer v.release() + + numFilesPrefix := "num-files-at-level" + switch { + case strings.HasPrefix(p, numFilesPrefix): + var level uint + var rest string + n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) + if n != 1 { + err = ErrNotFound + } else { + value = fmt.Sprint(v.tLen(int(level))) + } + case p == "stats": + value = "Compactions\n" + + " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + + "-------+------------+---------------+---------------+---------------+---------------\n" + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) + if len(tables) == 0 && duration == 0 { + continue + } + value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", + level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), + float64(read)/1048576.0, float64(write)/1048576.0) + } + case p == "iostats": + value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f", + float64(db.s.stor.reads())/1048576.0, + float64(db.s.stor.writes())/1048576.0) + case p == "writedelay": + writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay)) + paused := atomic.LoadInt32(&db.inWritePaused) == 1 + value = fmt.Sprintf("DelayN:%d Delay:%s Paused:%t", writeDelayN, writeDelay, paused) + case p == "sstables": + for level, tables := range v.levels { + value += fmt.Sprintf("--- level %d ---\n", level) + for _, t := range tables { + value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax) + } + } + case p == "blockpool": + value = fmt.Sprintf("%v", db.s.tops.bpool) + case p == "cachedblock": + if db.s.tops.bcache != nil { + value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) + } else { + value = "" + } + case p == "openedtables": + value = fmt.Sprintf("%d", db.s.tops.cache.Size()) + case p == "alivesnaps": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) + case p == "aliveiters": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) + default: + err = ErrNotFound + } + + return +} + +// DBStats is database statistics. +type DBStats struct { + WriteDelayCount int32 + WriteDelayDuration time.Duration + WritePaused bool + + AliveSnapshots int32 + AliveIterators int32 + + IOWrite uint64 + IORead uint64 + + BlockCacheSize int + OpenedTablesCount int + + LevelSizes []int64 + LevelTablesCounts []int + LevelRead []int64 + LevelWrite []int64 + LevelDurations []time.Duration +} + +// Stats populates s with database statistics. +func (db *DB) Stats(s *DBStats) error { + err := db.ok() + if err != nil { + return err + } + + s.IORead = db.s.stor.reads() + s.IOWrite = db.s.stor.writes() + s.WriteDelayCount = atomic.LoadInt32(&db.cWriteDelayN) + s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay)) + s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1 + + s.OpenedTablesCount = db.s.tops.cache.Size() + if db.s.tops.bcache != nil { + s.BlockCacheSize = db.s.tops.bcache.Size() + } else { + s.BlockCacheSize = 0 + } + + s.AliveIterators = atomic.LoadInt32(&db.aliveIters) + s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps) + + s.LevelDurations = s.LevelDurations[:0] + s.LevelRead = s.LevelRead[:0] + s.LevelWrite = s.LevelWrite[:0] + s.LevelSizes = s.LevelSizes[:0] + s.LevelTablesCounts = s.LevelTablesCounts[:0] + + v := db.s.version() + defer v.release() + + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) + if len(tables) == 0 && duration == 0 { + continue + } + s.LevelDurations = append(s.LevelDurations, duration) + s.LevelRead = append(s.LevelRead, read) + s.LevelWrite = append(s.LevelWrite, write) + s.LevelSizes = append(s.LevelSizes, tables.size()) + s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables)) + } + + return nil +} + +// SizeOf calculates approximate sizes of the given key ranges. +// The length of the returned sizes are equal with the length of the given +// ranges. The returned sizes measure storage space usage, so if the user +// data compresses by a factor of ten, the returned sizes will be one-tenth +// the size of the corresponding user data size. +// The results may not include the sizes of recently written data. +func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { + if err := db.ok(); err != nil { + return nil, err + } + + v := db.s.version() + defer v.release() + + sizes := make(Sizes, 0, len(ranges)) + for _, r := range ranges { + imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek) + imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek) + start, err := v.offsetOf(imin) + if err != nil { + return nil, err + } + limit, err := v.offsetOf(imax) + if err != nil { + return nil, err + } + var size int64 + if limit >= start { + size = limit - start + } + sizes = append(sizes, size) + } + + return sizes, nil +} + +// Close closes the DB. This will also releases any outstanding snapshot, +// abort any in-flight compaction and discard open transaction. +// +// It is not safe to close a DB until all outstanding iterators are released. +// It is valid to call Close multiple times. Other methods should not be +// called after the DB has been closed. +func (db *DB) Close() error { + if !db.setClosed() { + return ErrClosed + } + + start := time.Now() + db.log("db@close closing") + + // Clear the finalizer. + runtime.SetFinalizer(db, nil) + + // Get compaction error. + var err error + select { + case err = <-db.compErrC: + if err == ErrReadOnly { + err = nil + } + default: + } + + // Signal all goroutines. + close(db.closeC) + + // Discard open transaction. + if db.tr != nil { + db.tr.Discard() + } + + // Acquire writer lock. + db.writeLockC <- struct{}{} + + // Wait for all gorotines to exit. + db.closeW.Wait() + + // Closes journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + db.journal = nil + db.journalWriter = nil + } + + if db.writeDelayN > 0 { + db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) + } + + // Close session. + db.s.close() + db.logf("db@close done T·%v", time.Since(start)) + db.s.release() + + if db.closer != nil { + if err1 := db.closer.Close(); err == nil { + err = err1 + } + db.closer = nil + } + + // Clear memdbs. + db.clearMems() + + return err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go new file mode 100644 index 00000000000..28e50906adb --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -0,0 +1,860 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +var ( + errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") +) + +type cStat struct { + duration time.Duration + read int64 + write int64 +} + +func (p *cStat) add(n *cStatStaging) { + p.duration += n.duration + p.read += n.read + p.write += n.write +} + +func (p *cStat) get() (duration time.Duration, read, write int64) { + return p.duration, p.read, p.write +} + +type cStatStaging struct { + start time.Time + duration time.Duration + on bool + read int64 + write int64 +} + +func (p *cStatStaging) startTimer() { + if !p.on { + p.start = time.Now() + p.on = true + } +} + +func (p *cStatStaging) stopTimer() { + if p.on { + p.duration += time.Since(p.start) + p.on = false + } +} + +type cStats struct { + lk sync.Mutex + stats []cStat +} + +func (p *cStats) addStat(level int, n *cStatStaging) { + p.lk.Lock() + if level >= len(p.stats) { + newStats := make([]cStat, level+1) + copy(newStats, p.stats) + p.stats = newStats + } + p.stats[level].add(n) + p.lk.Unlock() +} + +func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) { + p.lk.Lock() + defer p.lk.Unlock() + if level < len(p.stats) { + return p.stats[level].get() + } + return +} + +func (db *DB) compactionError() { + var err error +noerr: + // No error. + for { + select { + case err = <-db.compErrSetC: + switch { + case err == nil: + case err == ErrReadOnly, errors.IsCorrupted(err): + goto hasperr + default: + goto haserr + } + case <-db.closeC: + return + } + } +haserr: + // Transient error. + for { + select { + case db.compErrC <- err: + case err = <-db.compErrSetC: + switch { + case err == nil: + goto noerr + case err == ErrReadOnly, errors.IsCorrupted(err): + goto hasperr + default: + } + case <-db.closeC: + return + } + } +hasperr: + // Persistent error. + for { + select { + case db.compErrC <- err: + case db.compPerErrC <- err: + case db.writeLockC <- struct{}{}: + // Hold write lock, so that write won't pass-through. + db.compWriteLocking = true + case <-db.closeC: + if db.compWriteLocking { + // We should release the lock or Close will hang. + <-db.writeLockC + } + return + } + } +} + +type compactionTransactCounter int + +func (cnt *compactionTransactCounter) incr() { + *cnt++ +} + +type compactionTransactInterface interface { + run(cnt *compactionTransactCounter) error + revert() error +} + +func (db *DB) compactionTransact(name string, t compactionTransactInterface) { + defer func() { + if x := recover(); x != nil { + if x == errCompactionTransactExiting { + if err := t.revert(); err != nil { + db.logf("%s revert error %q", name, err) + } + } + panic(x) + } + }() + + const ( + backoffMin = 1 * time.Second + backoffMax = 8 * time.Second + backoffMul = 2 * time.Second + ) + var ( + backoff = backoffMin + backoffT = time.NewTimer(backoff) + lastCnt = compactionTransactCounter(0) + + disableBackoff = db.s.o.GetDisableCompactionBackoff() + ) + for n := 0; ; n++ { + // Check whether the DB is closed. + if db.isClosed() { + db.logf("%s exiting", name) + db.compactionExitTransact() + } else if n > 0 { + db.logf("%s retrying N·%d", name, n) + } + + // Execute. + cnt := compactionTransactCounter(0) + err := t.run(&cnt) + if err != nil { + db.logf("%s error I·%d %q", name, cnt, err) + } + + // Set compaction error status. + select { + case db.compErrSetC <- err: + case perr := <-db.compPerErrC: + if err != nil { + db.logf("%s exiting (persistent error %q)", name, perr) + db.compactionExitTransact() + } + case <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + if err == nil { + return + } + if errors.IsCorrupted(err) { + db.logf("%s exiting (corruption detected)", name) + db.compactionExitTransact() + } + + if !disableBackoff { + // Reset backoff duration if counter is advancing. + if cnt > lastCnt { + backoff = backoffMin + lastCnt = cnt + } + + // Backoff. + backoffT.Reset(backoff) + if backoff < backoffMax { + backoff *= backoffMul + if backoff > backoffMax { + backoff = backoffMax + } + } + select { + case <-backoffT.C: + case <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + } + } +} + +type compactionTransactFunc struct { + runFunc func(cnt *compactionTransactCounter) error + revertFunc func() error +} + +func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { + return t.runFunc(cnt) +} + +func (t *compactionTransactFunc) revert() error { + if t.revertFunc != nil { + return t.revertFunc() + } + return nil +} + +func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { + db.compactionTransact(name, &compactionTransactFunc{run, revert}) +} + +func (db *DB) compactionExitTransact() { + panic(errCompactionTransactExiting) +} + +func (db *DB) compactionCommit(name string, rec *sessionRecord) { + db.compCommitLk.Lock() + defer db.compCommitLk.Unlock() // Defer is necessary. + db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error { + return db.s.commit(rec) + }, nil) +} + +func (db *DB) memCompaction() { + mdb := db.getFrozenMem() + if mdb == nil { + return + } + defer mdb.decref() + + db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size())) + + // Don't compact empty memdb. + if mdb.Len() == 0 { + db.logf("memdb@flush skipping") + // drop frozen memdb + db.dropFrozenMem() + return + } + + // Pause table compaction. + resumeC := make(chan struct{}) + select { + case db.tcompPauseC <- (chan<- struct{})(resumeC): + case <-db.compPerErrC: + close(resumeC) + resumeC = nil + case <-db.closeC: + db.compactionExitTransact() + } + + var ( + rec = &sessionRecord{} + stats = &cStatStaging{} + flushLevel int + ) + + // Generate tables. + db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { + stats.startTimer() + flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel) + stats.stopTimer() + return + }, func() error { + for _, r := range rec.addedTables { + db.logf("memdb@flush revert @%d", r.num) + if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil { + return err + } + } + return nil + }) + + rec.setJournalNum(db.journalFd.Num) + rec.setSeqNum(db.frozenSeq) + + // Commit. + stats.startTimer() + db.compactionCommit("memdb", rec) + stats.stopTimer() + + db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) + + for _, r := range rec.addedTables { + stats.write += r.size + } + db.compStats.addStat(flushLevel, stats) + + // Drop frozen memdb. + db.dropFrozenMem() + + // Resume table compaction. + if resumeC != nil { + select { + case <-resumeC: + close(resumeC) + case <-db.closeC: + db.compactionExitTransact() + } + } + + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) +} + +type tableCompactionBuilder struct { + db *DB + s *session + c *compaction + rec *sessionRecord + stat0, stat1 *cStatStaging + + snapHasLastUkey bool + snapLastUkey []byte + snapLastSeq uint64 + snapIter int + snapKerrCnt int + snapDropCnt int + + kerrCnt int + dropCnt int + + minSeq uint64 + strict bool + tableSize int + + tw *tWriter +} + +func (b *tableCompactionBuilder) appendKV(key, value []byte) error { + // Create new table if not already. + if b.tw == nil { + // Check for pause event. + if b.db != nil { + select { + case ch := <-b.db.tcompPauseC: + b.db.pauseCompaction(ch) + case <-b.db.closeC: + b.db.compactionExitTransact() + default: + } + } + + // Create new table. + var err error + b.tw, err = b.s.tops.create() + if err != nil { + return err + } + } + + // Write key/value into table. + return b.tw.append(key, value) +} + +func (b *tableCompactionBuilder) needFlush() bool { + return b.tw.tw.BytesLen() >= b.tableSize +} + +func (b *tableCompactionBuilder) flush() error { + t, err := b.tw.finish() + if err != nil { + return err + } + b.rec.addTableFile(b.c.sourceLevel+1, t) + b.stat1.write += t.size + b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) + b.tw = nil + return nil +} + +func (b *tableCompactionBuilder) cleanup() { + if b.tw != nil { + b.tw.drop() + b.tw = nil + } +} + +func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { + snapResumed := b.snapIter > 0 + hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. + lastUkey := append([]byte{}, b.snapLastUkey...) + lastSeq := b.snapLastSeq + b.kerrCnt = b.snapKerrCnt + b.dropCnt = b.snapDropCnt + // Restore compaction state. + b.c.restore() + + defer b.cleanup() + + b.stat1.startTimer() + defer b.stat1.stopTimer() + + iter := b.c.newIterator() + defer iter.Release() + for i := 0; iter.Next(); i++ { + // Incr transact counter. + cnt.incr() + + // Skip until last state. + if i < b.snapIter { + continue + } + + resumed := false + if snapResumed { + resumed = true + snapResumed = false + } + + ikey := iter.Key() + ukey, seq, kt, kerr := parseInternalKey(ikey) + + if kerr == nil { + shouldStop := !resumed && b.c.shouldStopBefore(ikey) + + if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { + // First occurrence of this user key. + + // Only rotate tables if ukey doesn't hop across. + if b.tw != nil && (shouldStop || b.needFlush()) { + if err := b.flush(); err != nil { + return err + } + + // Creates snapshot of the state. + b.c.save() + b.snapHasLastUkey = hasLastUkey + b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) + b.snapLastSeq = lastSeq + b.snapIter = i + b.snapKerrCnt = b.kerrCnt + b.snapDropCnt = b.dropCnt + } + + hasLastUkey = true + lastUkey = append(lastUkey[:0], ukey...) + lastSeq = keyMaxSeq + } + + switch { + case lastSeq <= b.minSeq: + // Dropped because newer entry for same user key exist + fallthrough // (A) + case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): + // For this user key: + // (1) there is no data in higher levels + // (2) data in lower levels will have larger seq numbers + // (3) data in layers that are being compacted here and have + // smaller seq numbers will be dropped in the next + // few iterations of this loop (by rule (A) above). + // Therefore this deletion marker is obsolete and can be dropped. + lastSeq = seq + b.dropCnt++ + continue + default: + lastSeq = seq + } + } else { + if b.strict { + return kerr + } + + // Don't drop corrupted keys. + hasLastUkey = false + lastUkey = lastUkey[:0] + lastSeq = keyMaxSeq + b.kerrCnt++ + } + + if err := b.appendKV(ikey, iter.Value()); err != nil { + return err + } + } + + if err := iter.Error(); err != nil { + return err + } + + // Finish last table. + if b.tw != nil && !b.tw.empty() { + return b.flush() + } + return nil +} + +func (b *tableCompactionBuilder) revert() error { + for _, at := range b.rec.addedTables { + b.s.logf("table@build revert @%d", at.num) + if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil { + return err + } + } + return nil +} + +func (db *DB) tableCompaction(c *compaction, noTrivial bool) { + defer c.release() + + rec := &sessionRecord{} + rec.addCompPtr(c.sourceLevel, c.imax) + + if !noTrivial && c.trivial() { + t := c.levels[0][0] + db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1) + rec.delTable(c.sourceLevel, t.fd.Num) + rec.addTableFile(c.sourceLevel+1, t) + db.compactionCommit("table-move", rec) + return + } + + var stats [2]cStatStaging + for i, tables := range c.levels { + for _, t := range tables { + stats[i].read += t.size + // Insert deleted tables into record + rec.delTable(c.sourceLevel+i, t.fd.Num) + } + } + sourceSize := int(stats[0].read + stats[1].read) + minSeq := db.minSeq() + db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) + + b := &tableCompactionBuilder{ + db: db, + s: db.s, + c: c, + rec: rec, + stat1: &stats[1], + minSeq: minSeq, + strict: db.s.o.GetStrict(opt.StrictCompaction), + tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1), + } + db.compactionTransact("table@build", b) + + // Commit. + stats[1].startTimer() + db.compactionCommit("table", rec) + stats[1].stopTimer() + + resultSize := int(stats[1].write) + db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) + + // Save compaction stats + for i := range stats { + db.compStats.addStat(c.sourceLevel+1, &stats[i]) + } +} + +func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error { + db.logf("table@compaction range L%d %q:%q", level, umin, umax) + if level >= 0 { + if c := db.s.getCompactionRange(level, umin, umax, true); c != nil { + db.tableCompaction(c, true) + } + } else { + // Retry until nothing to compact. + for { + compacted := false + + // Scan for maximum level with overlapped tables. + v := db.s.version() + m := 1 + for i := m; i < len(v.levels); i++ { + tables := v.levels[i] + if tables.overlaps(db.s.icmp, umin, umax, false) { + m = i + } + } + v.release() + + for level := 0; level < m; level++ { + if c := db.s.getCompactionRange(level, umin, umax, false); c != nil { + db.tableCompaction(c, true) + compacted = true + } + } + + if !compacted { + break + } + } + } + + return nil +} + +func (db *DB) tableAutoCompaction() { + if c := db.s.pickCompaction(); c != nil { + db.tableCompaction(c, false) + } +} + +func (db *DB) tableNeedCompaction() bool { + v := db.s.version() + defer v.release() + return v.needCompaction() +} + +// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted. +func (db *DB) resumeWrite() bool { + v := db.s.version() + defer v.release() + if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() { + return true + } + return false +} + +func (db *DB) pauseCompaction(ch chan<- struct{}) { + select { + case ch <- struct{}{}: + case <-db.closeC: + db.compactionExitTransact() + } +} + +type cCmd interface { + ack(err error) +} + +type cAuto struct { + // Note for table compaction, an empty ackC represents it's a compaction waiting command. + ackC chan<- error +} + +func (r cAuto) ack(err error) { + if r.ackC != nil { + defer func() { + recover() + }() + r.ackC <- err + } +} + +type cRange struct { + level int + min, max []byte + ackC chan<- error +} + +func (r cRange) ack(err error) { + if r.ackC != nil { + defer func() { + recover() + }() + r.ackC <- err + } +} + +// This will trigger auto compaction but will not wait for it. +func (db *DB) compTrigger(compC chan<- cCmd) { + select { + case compC <- cAuto{}: + default: + } +} + +// This will trigger auto compaction and/or wait for all compaction to be done. +func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cAuto{ch}: + case err = <-db.compErrC: + return + case <-db.closeC: + return ErrClosed + } + // Wait cmd. + select { + case err = <-ch: + case err = <-db.compErrC: + case <-db.closeC: + return ErrClosed + } + return err +} + +// Send range compaction request. +func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cRange{level, min, max, ch}: + case err := <-db.compErrC: + return err + case <-db.closeC: + return ErrClosed + } + // Wait cmd. + select { + case err = <-ch: + case err = <-db.compErrC: + case <-db.closeC: + return ErrClosed + } + return err +} + +func (db *DB) mCompaction() { + var x cCmd + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + select { + case x = <-db.mcompCmdC: + switch x.(type) { + case cAuto: + db.memCompaction() + x.ack(nil) + x = nil + default: + panic("leveldb: unknown command") + } + case <-db.closeC: + return + } + } +} + +func (db *DB) tCompaction() { + var ( + x cCmd + ackQ, waitQ []cCmd + ) + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + for i := range ackQ { + ackQ[i].ack(ErrClosed) + ackQ[i] = nil + } + for i := range waitQ { + waitQ[i].ack(ErrClosed) + waitQ[i] = nil + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + if db.tableNeedCompaction() { + select { + case x = <-db.tcompCmdC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case <-db.closeC: + return + default: + } + // Resume write operation as soon as possible. + if len(waitQ) > 0 && db.resumeWrite() { + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] + } + } else { + for i := range ackQ { + ackQ[i].ack(nil) + ackQ[i] = nil + } + ackQ = ackQ[:0] + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] + select { + case x = <-db.tcompCmdC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case <-db.closeC: + return + } + } + if x != nil { + switch cmd := x.(type) { + case cAuto: + if cmd.ackC != nil { + waitQ = append(waitQ, x) + } else { + ackQ = append(ackQ, x) + } + case cRange: + x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) + default: + panic("leveldb: unknown command") + } + x = nil + } + db.tableAutoCompaction() + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go new file mode 100644 index 00000000000..03c24cdab50 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go @@ -0,0 +1,360 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "math/rand" + "runtime" + "sync" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") +) + +type memdbReleaser struct { + once sync.Once + m *memDB +} + +func (mr *memdbReleaser) Release() { + mr.once.Do(func() { + mr.m.decref() + }) +} + +func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) + em, fm := db.getMems() + v := db.s.version() + + tableIts := v.getIterators(slice, ro) + n := len(tableIts) + len(auxt) + 3 + its := make([]iterator.Iterator, 0, n) + + if auxm != nil { + ami := auxm.NewIterator(slice) + ami.SetReleaser(&memdbReleaser{m: auxm}) + its = append(its, ami) + } + for _, t := range auxt { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + + emi := em.NewIterator(slice) + emi.SetReleaser(&memdbReleaser{m: em}) + its = append(its, emi) + if fm != nil { + fmi := fm.NewIterator(slice) + fmi.SetReleaser(&memdbReleaser{m: fm}) + its = append(its, fmi) + } + its = append(its, tableIts...) + mi := iterator.NewMergedIterator(its, db.s.icmp, strict) + mi.SetReleaser(&versionReleaser{v: v}) + return mi +} + +func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { + var islice *util.Range + if slice != nil { + islice = &util.Range{} + if slice.Start != nil { + islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek) + } + if slice.Limit != nil { + islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek) + } + } + rawIter := db.newRawIterator(auxm, auxt, islice, ro) + iter := &dbIter{ + db: db, + icmp: db.s.icmp, + iter: rawIter, + seq: seq, + strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), + key: make([]byte, 0), + value: make([]byte, 0), + } + atomic.AddInt32(&db.aliveIters, 1) + runtime.SetFinalizer(iter, (*dbIter).Release) + return iter +} + +func (db *DB) iterSamplingRate() int { + return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +// dbIter represent an interator states over a database session. +type dbIter struct { + db *DB + icmp *iComparer + iter iterator.Iterator + seq uint64 + strict bool + + smaplingGap int + dir dir + key []byte + value []byte + err error + releaser util.Releaser +} + +func (i *dbIter) sampleSeek() { + ikey := i.iter.Key() + i.smaplingGap -= len(ikey) + len(i.iter.Value()) + for i.smaplingGap < 0 { + i.smaplingGap += i.db.iterSamplingRate() + i.db.sampleSeek(ikey) + } +} + +func (i *dbIter) setErr(err error) { + i.err = err + i.key = nil + i.value = nil +} + +func (i *dbIter) iterErr() { + if err := i.iter.Error(); err != nil { + i.setErr(err) + } +} + +func (i *dbIter) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *dbIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.First() { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.Last() { + return i.prev() + } + i.dir = dirSOI + i.iterErr() + return false +} + +func (i *dbIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek) + if i.iter.Seek(ikey) { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) next() bool { + for { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if seq <= i.seq { + switch kt { + case keyTypeDel: + // Skip deleted key. + i.key = append(i.key[:0], ukey...) + i.dir = dirForward + case keyTypeVal: + if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + i.dir = dirForward + return true + } + } + } + } else if i.strict { + i.setErr(kerr) + break + } + if !i.iter.Next() { + i.dir = dirEOI + i.iterErr() + break + } + } + return false +} + +func (i *dbIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { + i.dir = dirEOI + i.iterErr() + return false + } + return i.next() +} + +func (i *dbIter) prev() bool { + i.dir = dirBackward + del := true + if i.iter.Valid() { + for { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if seq <= i.seq { + if !del && i.icmp.uCompare(ukey, i.key) < 0 { + return true + } + del = (kt == keyTypeDel) + if !del { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + } + } + } else if i.strict { + i.setErr(kerr) + return false + } + if !i.iter.Prev() { + break + } + } + } + if del { + i.dir = dirSOI + i.iterErr() + return false + } + return true +} + +func (i *dbIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + for i.iter.Prev() { + if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if i.icmp.uCompare(ukey, i.key) < 0 { + goto cont + } + } else if i.strict { + i.setErr(kerr) + return false + } + } + i.dir = dirSOI + i.iterErr() + return false + } + +cont: + return i.prev() +} + +func (i *dbIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *dbIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *dbIter) Release() { + if i.dir != dirReleased { + // Clear the finalizer. + runtime.SetFinalizer(i, nil) + + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + + i.dir = dirReleased + i.key = nil + i.value = nil + i.iter.Release() + i.iter = nil + atomic.AddInt32(&i.db.aliveIters, -1) + i.db = nil + } +} + +func (i *dbIter) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *dbIter) Error() error { + return i.err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go new file mode 100644 index 00000000000..2c69d2e531d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go @@ -0,0 +1,183 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "container/list" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type snapshotElement struct { + seq uint64 + ref int + e *list.Element +} + +// Acquires a snapshot, based on latest sequence. +func (db *DB) acquireSnapshot() *snapshotElement { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + seq := db.getSeq() + + if e := db.snapsList.Back(); e != nil { + se := e.Value.(*snapshotElement) + if se.seq == seq { + se.ref++ + return se + } else if seq < se.seq { + panic("leveldb: sequence number is not increasing") + } + } + se := &snapshotElement{seq: seq, ref: 1} + se.e = db.snapsList.PushBack(se) + return se +} + +// Releases given snapshot element. +func (db *DB) releaseSnapshot(se *snapshotElement) { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + se.ref-- + if se.ref == 0 { + db.snapsList.Remove(se.e) + se.e = nil + } else if se.ref < 0 { + panic("leveldb: Snapshot: negative element reference") + } +} + +// Gets minimum sequence that not being snapshotted. +func (db *DB) minSeq() uint64 { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + if e := db.snapsList.Front(); e != nil { + return e.Value.(*snapshotElement).seq + } + + return db.getSeq() +} + +// Snapshot is a DB snapshot. +type Snapshot struct { + db *DB + elem *snapshotElement + mu sync.RWMutex + released bool +} + +// Creates new snapshot object. +func (db *DB) newSnapshot() *Snapshot { + snap := &Snapshot{ + db: db, + elem: db.acquireSnapshot(), + } + atomic.AddInt32(&db.aliveSnaps, 1) + runtime.SetFinalizer(snap, (*Snapshot).Release) + return snap +} + +func (snap *Snapshot) String() string { + return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) +} + +// Get gets the value for the given key. It returns ErrNotFound if +// the DB does not contains the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = snap.db.ok() + if err != nil { + return + } + snap.mu.RLock() + defer snap.mu.RUnlock() + if snap.released { + err = ErrSnapshotReleased + return + } + return snap.db.get(nil, nil, key, snap.elem.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Get returns. +func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { + err = snap.db.ok() + if err != nil { + return + } + snap.mu.RLock() + defer snap.mu.RUnlock() + if snap.released { + err = ErrSnapshotReleased + return + } + return snap.db.has(nil, nil, key, snap.elem.seq, ro) +} + +// NewIterator returns an iterator for the snapshot of the underlying DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// Releasing the snapshot doesn't mean releasing the iterator too, the +// iterator would be still valid until released. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := snap.db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + snap.mu.Lock() + defer snap.mu.Unlock() + if snap.released { + return iterator.NewEmptyIterator(ErrSnapshotReleased) + } + // Since iterator already hold version ref, it doesn't need to + // hold snapshot ref. + return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro) +} + +// Release releases the snapshot. This will not release any returned +// iterators, the iterators would still be valid until released or the +// underlying DB is closed. +// +// Other methods should not be called after the snapshot has been released. +func (snap *Snapshot) Release() { + snap.mu.Lock() + defer snap.mu.Unlock() + + if !snap.released { + // Clear the finalizer. + runtime.SetFinalizer(snap, nil) + + snap.released = true + snap.db.releaseSnapshot(snap.elem) + atomic.AddInt32(&snap.db.aliveSnaps, -1) + snap.db = nil + snap.elem = nil + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go new file mode 100644 index 00000000000..65e1c54bb41 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go @@ -0,0 +1,239 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +var ( + errHasFrozenMem = errors.New("has frozen mem") +) + +type memDB struct { + db *DB + *memdb.DB + ref int32 +} + +func (m *memDB) getref() int32 { + return atomic.LoadInt32(&m.ref) +} + +func (m *memDB) incref() { + atomic.AddInt32(&m.ref, 1) +} + +func (m *memDB) decref() { + if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { + // Only put back memdb with std capacity. + if m.Capacity() == m.db.s.o.GetWriteBuffer() { + m.Reset() + m.db.mpoolPut(m.DB) + } + m.db = nil + m.DB = nil + } else if ref < 0 { + panic("negative memdb ref") + } +} + +// Get latest sequence number. +func (db *DB) getSeq() uint64 { + return atomic.LoadUint64(&db.seq) +} + +// Atomically adds delta to seq. +func (db *DB) addSeq(delta uint64) { + atomic.AddUint64(&db.seq, delta) +} + +func (db *DB) setSeq(seq uint64) { + atomic.StoreUint64(&db.seq, seq) +} + +func (db *DB) sampleSeek(ikey internalKey) { + v := db.s.version() + if v.sampleSeek(ikey) { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + v.release() +} + +func (db *DB) mpoolPut(mem *memdb.DB) { + if !db.isClosed() { + select { + case db.memPool <- mem: + default: + } + } +} + +func (db *DB) mpoolGet(n int) *memDB { + var mdb *memdb.DB + select { + case mdb = <-db.memPool: + default: + } + if mdb == nil || mdb.Capacity() < n { + mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) + } + return &memDB{ + db: db, + DB: mdb, + } +} + +func (db *DB) mpoolDrain() { + ticker := time.NewTicker(30 * time.Second) + for { + select { + case <-ticker.C: + select { + case <-db.memPool: + default: + } + case <-db.closeC: + ticker.Stop() + // Make sure the pool is drained. + select { + case <-db.memPool: + case <-time.After(time.Second): + } + close(db.memPool) + return + } + } +} + +// Create new memdb and froze the old one; need external synchronization. +// newMem only called synchronously by the writer. +func (db *DB) newMem(n int) (mem *memDB, err error) { + fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} + w, err := db.s.stor.Create(fd) + if err != nil { + db.s.reuseFileNum(fd.Num) + return + } + + db.memMu.Lock() + defer db.memMu.Unlock() + + if db.frozenMem != nil { + return nil, errHasFrozenMem + } + + if db.journal == nil { + db.journal = journal.NewWriter(w) + } else { + db.journal.Reset(w) + db.journalWriter.Close() + db.frozenJournalFd = db.journalFd + } + db.journalWriter = w + db.journalFd = fd + db.frozenMem = db.mem + mem = db.mpoolGet(n) + mem.incref() // for self + mem.incref() // for caller + db.mem = mem + // The seq only incremented by the writer. And whoever called newMem + // should hold write lock, so no need additional synchronization here. + db.frozenSeq = db.seq + return +} + +// Get all memdbs. +func (db *DB) getMems() (e, f *memDB) { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem != nil { + db.mem.incref() + } else if !db.isClosed() { + panic("nil effective mem") + } + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.mem, db.frozenMem +} + +// Get effective memdb. +func (db *DB) getEffectiveMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem != nil { + db.mem.incref() + } else if !db.isClosed() { + panic("nil effective mem") + } + return db.mem +} + +// Check whether we has frozen memdb. +func (db *DB) hasFrozenMem() bool { + db.memMu.RLock() + defer db.memMu.RUnlock() + return db.frozenMem != nil +} + +// Get frozen memdb. +func (db *DB) getFrozenMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.frozenMem +} + +// Drop frozen memdb; assume that frozen memdb isn't nil. +func (db *DB) dropFrozenMem() { + db.memMu.Lock() + if err := db.s.stor.Remove(db.frozenJournalFd); err != nil { + db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err) + } else { + db.logf("journal@remove removed @%d", db.frozenJournalFd.Num) + } + db.frozenJournalFd = storage.FileDesc{} + db.frozenMem.decref() + db.frozenMem = nil + db.memMu.Unlock() +} + +// Clear mems ptr; used by DB.Close(). +func (db *DB) clearMems() { + db.memMu.Lock() + db.mem = nil + db.frozenMem = nil + db.memMu.Unlock() +} + +// Set closed flag; return true if not already closed. +func (db *DB) setClosed() bool { + return atomic.CompareAndSwapUint32(&db.closed, 0, 1) +} + +// Check whether DB was closed. +func (db *DB) isClosed() bool { + return atomic.LoadUint32(&db.closed) != 0 +} + +// Check read ok status. +func (db *DB) ok() error { + if db.isClosed() { + return ErrClosed + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go new file mode 100644 index 00000000000..b8f7e7d21df --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go @@ -0,0 +1,325 @@ +// Copyright (c) 2016, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var errTransactionDone = errors.New("leveldb: transaction already closed") + +// Transaction is the transaction handle. +type Transaction struct { + db *DB + lk sync.RWMutex + seq uint64 + mem *memDB + tables tFiles + ikScratch []byte + rec sessionRecord + stats cStatStaging + closed bool +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contains the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return nil, errTransactionDone + } + return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Has returns. +func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return false, errTransactionDone + } + return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// NewIterator returns an iterator for the latest snapshot of the transaction. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently while writes to the +// transaction. The resultant key/value pairs are guaranteed to be consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return iterator.NewEmptyIterator(errTransactionDone) + } + tr.mem.incref() + return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro) +} + +func (tr *Transaction) flush() error { + // Flush memdb. + if tr.mem.Len() != 0 { + tr.stats.startTimer() + iter := tr.mem.NewIterator(nil) + t, n, err := tr.db.s.tops.createFrom(iter) + iter.Release() + tr.stats.stopTimer() + if err != nil { + return err + } + if tr.mem.getref() == 1 { + tr.mem.Reset() + } else { + tr.mem.decref() + tr.mem = tr.db.mpoolGet(0) + tr.mem.incref() + } + tr.tables = append(tr.tables, t) + tr.rec.addTableFile(0, t) + tr.stats.write += t.size + tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + } + return nil +} + +func (tr *Transaction) put(kt keyType, key, value []byte) error { + tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt) + if tr.mem.Free() < len(tr.ikScratch)+len(value) { + if err := tr.flush(); err != nil { + return err + } + } + if err := tr.mem.Put(tr.ikScratch, value); err != nil { + return err + } + tr.seq++ + return nil +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Put returns. +func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeVal, key, value) +} + +// Delete deletes the value for the given key. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeDel, key, nil) +} + +// Write apply the given batch to the transaction. The batch will be applied +// sequentially. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Write returns. +func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error { + if b == nil || b.Len() == 0 { + return nil + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return b.replayInternal(func(i int, kt keyType, k, v []byte) error { + return tr.put(kt, k, v) + }) +} + +func (tr *Transaction) setDone() { + tr.closed = true + tr.db.tr = nil + tr.mem.decref() + <-tr.db.writeLockC +} + +// Commit commits the transaction. If error is not nil, then the transaction is +// not committed, it can then either be retried or discarded. +// +// Other methods should not be called after transaction has been committed. +func (tr *Transaction) Commit() error { + if err := tr.db.ok(); err != nil { + return err + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + if err := tr.flush(); err != nil { + // Return error, lets user decide either to retry or discard + // transaction. + return err + } + if len(tr.tables) != 0 { + // Committing transaction. + tr.rec.setSeqNum(tr.seq) + tr.db.compCommitLk.Lock() + tr.stats.startTimer() + var cerr error + for retry := 0; retry < 3; retry++ { + cerr = tr.db.s.commit(&tr.rec) + if cerr != nil { + tr.db.logf("transaction@commit error R·%d %q", retry, cerr) + select { + case <-time.After(time.Second): + case <-tr.db.closeC: + tr.db.logf("transaction@commit exiting") + tr.db.compCommitLk.Unlock() + return cerr + } + } else { + // Success. Set db.seq. + tr.db.setSeq(tr.seq) + break + } + } + tr.stats.stopTimer() + if cerr != nil { + // Return error, lets user decide either to retry or discard + // transaction. + return cerr + } + + // Update compaction stats. This is safe as long as we hold compCommitLk. + tr.db.compStats.addStat(0, &tr.stats) + + // Trigger table auto-compaction. + tr.db.compTrigger(tr.db.tcompCmdC) + tr.db.compCommitLk.Unlock() + + // Additionally, wait compaction when certain threshold reached. + // Ignore error, returns error only if transaction can't be committed. + tr.db.waitCompaction() + } + // Only mark as done if transaction committed successfully. + tr.setDone() + return nil +} + +func (tr *Transaction) discard() { + // Discard transaction. + for _, t := range tr.tables { + tr.db.logf("transaction@discard @%d", t.fd.Num) + if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil { + tr.db.s.reuseFileNum(t.fd.Num) + } + } +} + +// Discard discards the transaction. +// +// Other methods should not be called after transaction has been discarded. +func (tr *Transaction) Discard() { + tr.lk.Lock() + if !tr.closed { + tr.discard() + tr.setDone() + } + tr.lk.Unlock() +} + +func (db *DB) waitCompaction() error { + if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() { + return db.compTriggerWait(db.tcompCmdC) + } + return nil +} + +// OpenTransaction opens an atomic DB transaction. Only one transaction can be +// opened at a time. Subsequent call to Write and OpenTransaction will be blocked +// until in-flight transaction is committed or discarded. +// The returned transaction handle is safe for concurrent use. +// +// Transaction is expensive and can overwhelm compaction, especially if +// transaction size is small. Use with caution. +// +// The transaction must be closed once done, either by committing or discarding +// the transaction. +// Closing the DB will discard open transaction. +func (db *DB) OpenTransaction() (*Transaction, error) { + if err := db.ok(); err != nil { + return nil, err + } + + // The write happen synchronously. + select { + case db.writeLockC <- struct{}{}: + case err := <-db.compPerErrC: + return nil, err + case <-db.closeC: + return nil, ErrClosed + } + + if db.tr != nil { + panic("leveldb: has open transaction") + } + + // Flush current memdb. + if db.mem != nil && db.mem.Len() != 0 { + if _, err := db.rotateMem(0, true); err != nil { + return nil, err + } + } + + // Wait compaction when certain threshold reached. + if err := db.waitCompaction(); err != nil { + return nil, err + } + + tr := &Transaction{ + db: db, + seq: db.seq, + mem: db.mpoolGet(0), + } + tr.mem.incref() + db.tr = tr + return tr, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go new file mode 100644 index 00000000000..7ecd960d2ce --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go @@ -0,0 +1,102 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Reader is the interface that wraps basic Get and NewIterator methods. +// This interface implemented by both DB and Snapshot. +type Reader interface { + Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) + NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator +} + +// Sizes is list of size. +type Sizes []int64 + +// Sum returns sum of the sizes. +func (sizes Sizes) Sum() int64 { + var sum int64 + for _, size := range sizes { + sum += size + } + return sum +} + +// Logging. +func (db *DB) log(v ...interface{}) { db.s.log(v...) } +func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } + +// Check and clean files. +func (db *DB) checkAndCleanFiles() error { + v := db.s.version() + defer v.release() + + tmap := make(map[int64]bool) + for _, tables := range v.levels { + for _, t := range tables { + tmap[t.fd.Num] = false + } + } + + fds, err := db.s.stor.List(storage.TypeAll) + if err != nil { + return err + } + + var nt int + var rem []storage.FileDesc + for _, fd := range fds { + keep := true + switch fd.Type { + case storage.TypeManifest: + keep = fd.Num >= db.s.manifestFd.Num + case storage.TypeJournal: + if !db.frozenJournalFd.Zero() { + keep = fd.Num >= db.frozenJournalFd.Num + } else { + keep = fd.Num >= db.journalFd.Num + } + case storage.TypeTable: + _, keep = tmap[fd.Num] + if keep { + tmap[fd.Num] = true + nt++ + } + } + + if !keep { + rem = append(rem, fd) + } + } + + if nt != len(tmap) { + var mfds []storage.FileDesc + for num, present := range tmap { + if !present { + mfds = append(mfds, storage.FileDesc{storage.TypeTable, num}) + db.logf("db@janitor table missing @%d", num) + } + } + return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds}) + } + + db.logf("db@janitor F·%d G·%d", len(fds), len(rem)) + for _, fd := range rem { + db.logf("db@janitor removing %s-%d", fd.Type, fd.Num) + if err := db.s.stor.Remove(fd); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go new file mode 100644 index 00000000000..db0c1bece1d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go @@ -0,0 +1,464 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error { + wr, err := db.journal.Next() + if err != nil { + return err + } + if err := writeBatchesWithHeader(wr, batches, seq); err != nil { + return err + } + if err := db.journal.Flush(); err != nil { + return err + } + if sync { + return db.journalWriter.Sync() + } + return nil +} + +func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) { + retryLimit := 3 +retry: + // Wait for pending memdb compaction. + err = db.compTriggerWait(db.mcompCmdC) + if err != nil { + return + } + retryLimit-- + + // Create new memdb and journal. + mem, err = db.newMem(n) + if err != nil { + if err == errHasFrozenMem { + if retryLimit <= 0 { + panic("BUG: still has frozen memdb") + } + goto retry + } + return + } + + // Schedule memdb compaction. + if wait { + err = db.compTriggerWait(db.mcompCmdC) + } else { + db.compTrigger(db.mcompCmdC) + } + return +} + +func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { + delayed := false + slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger() + pauseTrigger := db.s.o.GetWriteL0PauseTrigger() + flush := func() (retry bool) { + mdb = db.getEffectiveMem() + if mdb == nil { + err = ErrClosed + return false + } + defer func() { + if retry { + mdb.decref() + mdb = nil + } + }() + tLen := db.s.tLen(0) + mdbFree = mdb.Free() + switch { + case tLen >= slowdownTrigger && !delayed: + delayed = true + time.Sleep(time.Millisecond) + case mdbFree >= n: + return false + case tLen >= pauseTrigger: + delayed = true + // Set the write paused flag explicitly. + atomic.StoreInt32(&db.inWritePaused, 1) + err = db.compTriggerWait(db.tcompCmdC) + // Unset the write paused flag. + atomic.StoreInt32(&db.inWritePaused, 0) + if err != nil { + return false + } + default: + // Allow memdb to grow if it has no entry. + if mdb.Len() == 0 { + mdbFree = n + } else { + mdb.decref() + mdb, err = db.rotateMem(n, false) + if err == nil { + mdbFree = mdb.Free() + } else { + mdbFree = 0 + } + } + return false + } + return true + } + start := time.Now() + for flush() { + } + if delayed { + db.writeDelay += time.Since(start) + db.writeDelayN++ + } else if db.writeDelayN > 0 { + db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) + atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN)) + atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay)) + db.writeDelay = 0 + db.writeDelayN = 0 + } + return +} + +type writeMerge struct { + sync bool + batch *Batch + keyType keyType + key, value []byte +} + +func (db *DB) unlockWrite(overflow bool, merged int, err error) { + for i := 0; i < merged; i++ { + db.writeAckC <- err + } + if overflow { + // Pass lock to the next write (that failed to merge). + db.writeMergedC <- false + } else { + // Release lock. + <-db.writeLockC + } +} + +// ourBatch is batch that we can modify. +func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { + // Try to flush memdb. This method would also trying to throttle writes + // if it is too fast and compaction cannot catch-up. + mdb, mdbFree, err := db.flush(batch.internalLen) + if err != nil { + db.unlockWrite(false, 0, err) + return err + } + defer mdb.decref() + + var ( + overflow bool + merged int + batches = []*Batch{batch} + ) + + if merge { + // Merge limit. + var mergeLimit int + if batch.internalLen > 128<<10 { + mergeLimit = (1 << 20) - batch.internalLen + } else { + mergeLimit = 128 << 10 + } + mergeCap := mdbFree - batch.internalLen + if mergeLimit > mergeCap { + mergeLimit = mergeCap + } + + merge: + for mergeLimit > 0 { + select { + case incoming := <-db.writeMergeC: + if incoming.batch != nil { + // Merge batch. + if incoming.batch.internalLen > mergeLimit { + overflow = true + break merge + } + batches = append(batches, incoming.batch) + mergeLimit -= incoming.batch.internalLen + } else { + // Merge put. + internalLen := len(incoming.key) + len(incoming.value) + 8 + if internalLen > mergeLimit { + overflow = true + break merge + } + if ourBatch == nil { + ourBatch = db.batchPool.Get().(*Batch) + ourBatch.Reset() + batches = append(batches, ourBatch) + } + // We can use same batch since concurrent write doesn't + // guarantee write order. + ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value) + mergeLimit -= internalLen + } + sync = sync || incoming.sync + merged++ + db.writeMergedC <- true + + default: + break merge + } + } + } + + // Release ourBatch if any. + if ourBatch != nil { + defer db.batchPool.Put(ourBatch) + } + + // Seq number. + seq := db.seq + 1 + + // Write journal. + if err := db.writeJournal(batches, seq, sync); err != nil { + db.unlockWrite(overflow, merged, err) + return err + } + + // Put batches. + for _, batch := range batches { + if err := batch.putMem(seq, mdb.DB); err != nil { + panic(err) + } + seq += uint64(batch.Len()) + } + + // Incr seq number. + db.addSeq(uint64(batchesLen(batches))) + + // Rotate memdb if it's reach the threshold. + if batch.internalLen >= mdbFree { + db.rotateMem(0, false) + } + + db.unlockWrite(overflow, merged, nil) + return nil +} + +// Write apply the given batch to the DB. The batch records will be applied +// sequentially. Write might be used concurrently, when used concurrently and +// batch is small enough, write will try to merge the batches. Set NoWriteMerge +// option to true to disable write merge. +// +// It is safe to modify the contents of the arguments after Write returns but +// not before. Write will not modify content of the batch. +func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error { + if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 { + return err + } + + // If the batch size is larger than write buffer, it may justified to write + // using transaction instead. Using transaction the batch will be written + // into tables directly, skipping the journaling. + if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() { + tr, err := db.OpenTransaction() + if err != nil { + return err + } + if err := tr.Write(batch, wo); err != nil { + tr.Discard() + return err + } + return tr.Commit() + } + + merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() + sync := wo.GetSync() && !db.s.o.GetNoSync() + + // Acquire write lock. + if merge { + select { + case db.writeMergeC <- writeMerge{sync: sync, batch: batch}: + if <-db.writeMergedC { + // Write is merged. + return <-db.writeAckC + } + // Write is not merged, the write lock is handed to us. Continue. + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } else { + select { + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } + + return db.writeLocked(batch, nil, merge, sync) +} + +func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error { + if err := db.ok(); err != nil { + return err + } + + merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() + sync := wo.GetSync() && !db.s.o.GetNoSync() + + // Acquire write lock. + if merge { + select { + case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}: + if <-db.writeMergedC { + // Write is merged. + return <-db.writeAckC + } + // Write is not merged, the write lock is handed to us. Continue. + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } else { + select { + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } + + batch := db.batchPool.Get().(*Batch) + batch.Reset() + batch.appendRec(kt, key, value) + return db.writeLocked(batch, batch, merge, sync) +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. Write merge also applies for Put, see +// Write. +// +// It is safe to modify the contents of the arguments after Put returns but not +// before. +func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { + return db.putRec(keyTypeVal, key, value, wo) +} + +// Delete deletes the value for the given key. Delete will not returns error if +// key doesn't exist. Write merge also applies for Delete, see Write. +// +// It is safe to modify the contents of the arguments after Delete returns but +// not before. +func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { + return db.putRec(keyTypeDel, key, nil, wo) +} + +func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { + iter := mem.NewIterator(nil) + defer iter.Release() + return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) && + (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0)) +} + +// CompactRange compacts the underlying DB for the given key range. +// In particular, deleted and overwritten versions are discarded, +// and the data is rearranged to reduce the cost of operations +// needed to access the data. This operation should typically only +// be invoked by users who understand the underlying implementation. +// +// A nil Range.Start is treated as a key before all keys in the DB. +// And a nil Range.Limit is treated as a key after all keys in the DB. +// Therefore if both is nil then it will compact entire DB. +func (db *DB) CompactRange(r util.Range) error { + if err := db.ok(); err != nil { + return err + } + + // Lock writer. + select { + case db.writeLockC <- struct{}{}: + case err := <-db.compPerErrC: + return err + case <-db.closeC: + return ErrClosed + } + + // Check for overlaps in memdb. + mdb := db.getEffectiveMem() + if mdb == nil { + return ErrClosed + } + defer mdb.decref() + if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { + // Memdb compaction. + if _, err := db.rotateMem(0, false); err != nil { + <-db.writeLockC + return err + } + <-db.writeLockC + if err := db.compTriggerWait(db.mcompCmdC); err != nil { + return err + } + } else { + <-db.writeLockC + } + + // Table compaction. + return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit) +} + +// SetReadOnly makes DB read-only. It will stay read-only until reopened. +func (db *DB) SetReadOnly() error { + if err := db.ok(); err != nil { + return err + } + + // Lock writer. + select { + case db.writeLockC <- struct{}{}: + db.compWriteLocking = true + case err := <-db.compPerErrC: + return err + case <-db.closeC: + return ErrClosed + } + + // Set compaction read-only. + select { + case db.compErrSetC <- ErrReadOnly: + case perr := <-db.compPerErrC: + return perr + case <-db.closeC: + return ErrClosed + } + + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go new file mode 100644 index 00000000000..be768e5739f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go @@ -0,0 +1,92 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package leveldb provides implementation of LevelDB key/value database. +// +// Create or open a database: +// +// // The returned DB instance is safe for concurrent use. Which mean that all +// // DB's methods may be called concurrently from multiple goroutine. +// db, err := leveldb.OpenFile("path/to/db", nil) +// ... +// defer db.Close() +// ... +// +// Read or modify the database content: +// +// // Remember that the contents of the returned slice should not be modified. +// data, err := db.Get([]byte("key"), nil) +// ... +// err = db.Put([]byte("key"), []byte("value"), nil) +// ... +// err = db.Delete([]byte("key"), nil) +// ... +// +// Iterate over database content: +// +// iter := db.NewIterator(nil, nil) +// for iter.Next() { +// // Remember that the contents of the returned slice should not be modified, and +// // only valid until the next call to Next. +// key := iter.Key() +// value := iter.Value() +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content with a particular prefix: +// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Seek-then-Iterate: +// +// iter := db.NewIterator(nil, nil) +// for ok := iter.Seek(key); ok; ok = iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content: +// +// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Batch writes: +// +// batch := new(leveldb.Batch) +// batch.Put([]byte("foo"), []byte("value")) +// batch.Put([]byte("bar"), []byte("another value")) +// batch.Delete([]byte("baz")) +// err = db.Write(batch, nil) +// ... +// +// Use bloom filter: +// +// o := &opt.Options{ +// Filter: filter.NewBloomFilter(10), +// } +// db, err := leveldb.OpenFile("path/to/db", o) +// ... +// defer db.Close() +// ... +package leveldb diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go new file mode 100644 index 00000000000..de2649812c2 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go @@ -0,0 +1,20 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" +) + +// Common errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrReadOnly = errors.New("leveldb: read-only mode") + ErrSnapshotReleased = errors.New("leveldb: snapshot released") + ErrIterReleased = errors.New("leveldb: iterator released") + ErrClosed = errors.New("leveldb: closed") +) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go new file mode 100644 index 00000000000..8d6146b6f5c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go @@ -0,0 +1,78 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package errors provides common error types used throughout leveldb. +package errors + +import ( + "errors" + "fmt" + + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Common errors. +var ( + ErrNotFound = New("leveldb: not found") + ErrReleased = util.ErrReleased + ErrHasReleaser = util.ErrHasReleaser +) + +// New returns an error that formats as the given text. +func New(text string) error { + return errors.New(text) +} + +// ErrCorrupted is the type that wraps errors that indicate corruption in +// the database. +type ErrCorrupted struct { + Fd storage.FileDesc + Err error +} + +func (e *ErrCorrupted) Error() string { + if !e.Fd.Zero() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) + } + return e.Err.Error() +} + +// NewErrCorrupted creates new ErrCorrupted error. +func NewErrCorrupted(fd storage.FileDesc, err error) error { + return &ErrCorrupted{fd, err} +} + +// IsCorrupted returns a boolean indicating whether the error is indicating +// a corruption. +func IsCorrupted(err error) bool { + switch err.(type) { + case *ErrCorrupted: + return true + case *storage.ErrCorrupted: + return true + } + return false +} + +// ErrMissingFiles is the type that indicating a corruption due to missing +// files. ErrMissingFiles always wrapped with ErrCorrupted. +type ErrMissingFiles struct { + Fds []storage.FileDesc +} + +func (e *ErrMissingFiles) Error() string { return "file missing" } + +// SetFd sets 'file info' of the given error with the given file. +// Currently only ErrCorrupted is supported, otherwise will do nothing. +func SetFd(err error, fd storage.FileDesc) error { + switch x := err.(type) { + case *ErrCorrupted: + x.Fd = fd + return x + } + return err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go new file mode 100644 index 00000000000..e961e420d3c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go @@ -0,0 +1,31 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/filter" +) + +type iFilter struct { + filter.Filter +} + +func (f iFilter) Contains(filter, key []byte) bool { + return f.Filter.Contains(filter, internalKey(key).ukey()) +} + +func (f iFilter) NewGenerator() filter.FilterGenerator { + return iFilterGenerator{f.Filter.NewGenerator()} +} + +type iFilterGenerator struct { + filter.FilterGenerator +} + +func (g iFilterGenerator) Add(key []byte) { + g.FilterGenerator.Add(internalKey(key).ukey()) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go new file mode 100644 index 00000000000..bab0e99705f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go @@ -0,0 +1,116 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package filter + +import ( + "github.com/syndtr/goleveldb/leveldb/util" +) + +func bloomHash(key []byte) uint32 { + return util.Hash(key, 0xbc9f1d34) +} + +type bloomFilter int + +// The bloom filter serializes its parameters and is backward compatible +// with respect to them. Therefor, its parameters are not added to its +// name. +func (bloomFilter) Name() string { + return "leveldb.BuiltinBloomFilter" +} + +func (f bloomFilter) Contains(filter, key []byte) bool { + nBytes := len(filter) - 1 + if nBytes < 1 { + return false + } + nBits := uint32(nBytes * 8) + + // Use the encoded k so that we can read filters generated by + // bloom filters created using different parameters. + k := filter[nBytes] + if k > 30 { + // Reserved for potentially new encodings for short bloom filters. + // Consider it a match. + return true + } + + kh := bloomHash(key) + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < k; j++ { + bitpos := kh % nBits + if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { + return false + } + kh += delta + } + return true +} + +func (f bloomFilter) NewGenerator() FilterGenerator { + // Round down to reduce probing cost a little bit. + k := uint8(f * 69 / 100) // 0.69 =~ ln(2) + if k < 1 { + k = 1 + } else if k > 30 { + k = 30 + } + return &bloomFilterGenerator{ + n: int(f), + k: k, + } +} + +type bloomFilterGenerator struct { + n int + k uint8 + + keyHashes []uint32 +} + +func (g *bloomFilterGenerator) Add(key []byte) { + // Use double-hashing to generate a sequence of hash values. + // See analysis in [Kirsch,Mitzenmacher 2006]. + g.keyHashes = append(g.keyHashes, bloomHash(key)) +} + +func (g *bloomFilterGenerator) Generate(b Buffer) { + // Compute bloom filter size (in both bits and bytes) + nBits := uint32(len(g.keyHashes) * g.n) + // For small n, we can see a very high false positive rate. Fix it + // by enforcing a minimum bloom filter length. + if nBits < 64 { + nBits = 64 + } + nBytes := (nBits + 7) / 8 + nBits = nBytes * 8 + + dest := b.Alloc(int(nBytes) + 1) + dest[nBytes] = g.k + for _, kh := range g.keyHashes { + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < g.k; j++ { + bitpos := kh % nBits + dest[bitpos/8] |= (1 << (bitpos % 8)) + kh += delta + } + } + + g.keyHashes = g.keyHashes[:0] +} + +// NewBloomFilter creates a new initialized bloom filter for given +// bitsPerKey. +// +// Since bitsPerKey is persisted individually for each bloom filter +// serialization, bloom filters are backwards compatible with respect to +// changing bitsPerKey. This means that no big performance penalty will +// be experienced when changing the parameter. See documentation for +// opt.Options.Filter for more information. +func NewBloomFilter(bitsPerKey int) Filter { + return bloomFilter(bitsPerKey) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go new file mode 100644 index 00000000000..7a925c5a869 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go @@ -0,0 +1,60 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package filter provides interface and implementation of probabilistic +// data structure. +// +// The filter is resposible for creating small filter from a set of keys. +// These filter will then used to test whether a key is a member of the set. +// In many cases, a filter can cut down the number of disk seeks from a +// handful to a single disk seek per DB.Get call. +package filter + +// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. +type Buffer interface { + // Alloc allocs n bytes of slice from the buffer. This also advancing + // write offset. + Alloc(n int) []byte + + // Write appends the contents of p to the buffer. + Write(p []byte) (n int, err error) + + // WriteByte appends the byte c to the buffer. + WriteByte(c byte) error +} + +// Filter is the filter. +type Filter interface { + // Name returns the name of this policy. + // + // Note that if the filter encoding changes in an incompatible way, + // the name returned by this method must be changed. Otherwise, old + // incompatible filters may be passed to methods of this type. + Name() string + + // NewGenerator creates a new filter generator. + NewGenerator() FilterGenerator + + // Contains returns true if the filter contains the given key. + // + // The filter are filters generated by the filter generator. + Contains(filter, key []byte) bool +} + +// FilterGenerator is the filter generator. +type FilterGenerator interface { + // Add adds a key to the filter generator. + // + // The key may become invalid after call to this method end, therefor + // key must be copied if implementation require keeping key for later + // use. The key should not modified directly, doing so may cause + // undefined results. + Add(key []byte) + + // Generate generates filters based on keys passed so far. After call + // to Generate the filter generator maybe resetted, depends on implementation. + Generate(b Buffer) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go new file mode 100644 index 00000000000..a23ab05f70f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go @@ -0,0 +1,184 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/util" +) + +// BasicArray is the interface that wraps basic Len and Search method. +type BasicArray interface { + // Len returns length of the array. + Len() int + + // Search finds smallest index that point to a key that is greater + // than or equal to the given key. + Search(key []byte) int +} + +// Array is the interface that wraps BasicArray and basic Index method. +type Array interface { + BasicArray + + // Index returns key/value pair with index of i. + Index(i int) (key, value []byte) +} + +// Array is the interface that wraps BasicArray and basic Get method. +type ArrayIndexer interface { + BasicArray + + // Get returns a new data iterator with index of i. + Get(i int) Iterator +} + +type basicArrayIterator struct { + util.BasicReleaser + array BasicArray + pos int + err error +} + +func (i *basicArrayIterator) Valid() bool { + return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() +} + +func (i *basicArrayIterator) First() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.array.Len() == 0 { + i.pos = -1 + return false + } + i.pos = 0 + return true +} + +func (i *basicArrayIterator) Last() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = n - 1 + return true +} + +func (i *basicArrayIterator) Seek(key []byte) bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = i.array.Search(key) + if i.pos >= n { + return false + } + return true +} + +func (i *basicArrayIterator) Next() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.pos++ + if n := i.array.Len(); i.pos >= n { + i.pos = n + return false + } + return true +} + +func (i *basicArrayIterator) Prev() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.pos-- + if i.pos < 0 { + i.pos = -1 + return false + } + return true +} + +func (i *basicArrayIterator) Error() error { return i.err } + +type arrayIterator struct { + basicArrayIterator + array Array + pos int + key, value []byte +} + +func (i *arrayIterator) updateKV() { + if i.pos == i.basicArrayIterator.pos { + return + } + i.pos = i.basicArrayIterator.pos + if i.Valid() { + i.key, i.value = i.array.Index(i.pos) + } else { + i.key = nil + i.value = nil + } +} + +func (i *arrayIterator) Key() []byte { + i.updateKV() + return i.key +} + +func (i *arrayIterator) Value() []byte { + i.updateKV() + return i.value +} + +type arrayIteratorIndexer struct { + basicArrayIterator + array ArrayIndexer +} + +func (i *arrayIteratorIndexer) Get() Iterator { + if i.Valid() { + return i.array.Get(i.basicArrayIterator.pos) + } + return nil +} + +// NewArrayIterator returns an iterator from the given array. +func NewArrayIterator(array Array) Iterator { + return &arrayIterator{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + pos: -1, + } +} + +// NewArrayIndexer returns an index iterator from the given array. +func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { + return &arrayIteratorIndexer{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go new file mode 100644 index 00000000000..939adbb9332 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go @@ -0,0 +1,242 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// IteratorIndexer is the interface that wraps CommonIterator and basic Get +// method. IteratorIndexer provides index for indexed iterator. +type IteratorIndexer interface { + CommonIterator + + // Get returns a new data iterator for the current position, or nil if + // done. + Get() Iterator +} + +type indexedIterator struct { + util.BasicReleaser + index IteratorIndexer + strict bool + + data Iterator + err error + errf func(err error) + closed bool +} + +func (i *indexedIterator) setData() { + if i.data != nil { + i.data.Release() + } + i.data = i.index.Get() +} + +func (i *indexedIterator) clearData() { + if i.data != nil { + i.data.Release() + } + i.data = nil +} + +func (i *indexedIterator) indexErr() { + if err := i.index.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + i.err = err + } +} + +func (i *indexedIterator) dataErr() bool { + if err := i.data.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + if i.strict || !errors.IsCorrupted(err) { + i.err = err + return true + } + } + return false +} + +func (i *indexedIterator) Valid() bool { + return i.data != nil && i.data.Valid() +} + +func (i *indexedIterator) First() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.First() { + i.indexErr() + i.clearData() + return false + } + i.setData() + return i.Next() +} + +func (i *indexedIterator) Last() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.Last() { + i.indexErr() + i.clearData() + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + return true +} + +func (i *indexedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.Seek(key) { + i.indexErr() + i.clearData() + return false + } + i.setData() + if !i.data.Seek(key) { + if i.dataErr() { + return false + } + i.clearData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Next() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + switch { + case i.data != nil && !i.data.Next(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Next() { + i.indexErr() + return false + } + i.setData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Prev() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + switch { + case i.data != nil && !i.data.Prev(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Prev() { + i.indexErr() + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + } + return true +} + +func (i *indexedIterator) Key() []byte { + if i.data == nil { + return nil + } + return i.data.Key() +} + +func (i *indexedIterator) Value() []byte { + if i.data == nil { + return nil + } + return i.data.Value() +} + +func (i *indexedIterator) Release() { + i.clearData() + i.index.Release() + i.BasicReleaser.Release() +} + +func (i *indexedIterator) Error() error { + if i.err != nil { + return i.err + } + if err := i.index.Error(); err != nil { + return err + } + return nil +} + +func (i *indexedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewIndexedIterator returns an 'indexed iterator'. An index is iterator +// that returns another iterator, a 'data iterator'. A 'data iterator' is the +// iterator that contains actual key/value pairs. +// +// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) +// won't be ignored and will halt 'indexed iterator', otherwise the iterator will +// continue to the next 'data iterator'. Corruption on 'index iterator' will not be +// ignored and will halt the iterator. +func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { + return &indexedIterator{index: index, strict: strict} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go new file mode 100644 index 00000000000..b16e3a70452 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go @@ -0,0 +1,132 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package iterator provides interface and implementation to traverse over +// contents of a database. +package iterator + +import ( + "errors" + + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrIterReleased = errors.New("leveldb/iterator: iterator released") +) + +// IteratorSeeker is the interface that wraps the 'seeks method'. +type IteratorSeeker interface { + // First moves the iterator to the first key/value pair. If the iterator + // only contains one key/value pair then First and Last would moves + // to the same key/value pair. + // It returns whether such pair exist. + First() bool + + // Last moves the iterator to the last key/value pair. If the iterator + // only contains one key/value pair then First and Last would moves + // to the same key/value pair. + // It returns whether such pair exist. + Last() bool + + // Seek moves the iterator to the first key/value pair whose key is greater + // than or equal to the given key. + // It returns whether such pair exist. + // + // It is safe to modify the contents of the argument after Seek returns. + Seek(key []byte) bool + + // Next moves the iterator to the next key/value pair. + // It returns whether the iterator is exhausted. + Next() bool + + // Prev moves the iterator to the previous key/value pair. + // It returns whether the iterator is exhausted. + Prev() bool +} + +// CommonIterator is the interface that wraps common iterator methods. +type CommonIterator interface { + IteratorSeeker + + // util.Releaser is the interface that wraps basic Release method. + // When called Release will releases any resources associated with the + // iterator. + util.Releaser + + // util.ReleaseSetter is the interface that wraps the basic SetReleaser + // method. + util.ReleaseSetter + + // TODO: Remove this when ready. + Valid() bool + + // Error returns any accumulated error. Exhausting all the key/value pairs + // is not considered to be an error. + Error() error +} + +// Iterator iterates over a DB's key/value pairs in key order. +// +// When encounter an error any 'seeks method' will return false and will +// yield no key/value pairs. The error can be queried by calling the Error +// method. Calling Release is still necessary. +// +// An iterator must be released after use, but it is not necessary to read +// an iterator until exhaustion. +// Also, an iterator is not necessarily safe for concurrent use, but it is +// safe to use multiple iterators concurrently, with each in a dedicated +// goroutine. +type Iterator interface { + CommonIterator + + // Key returns the key of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Key() []byte + + // Value returns the value of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Value() []byte +} + +// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback +// method. +// +// ErrorCallbackSetter implemented by indexed and merged iterator. +type ErrorCallbackSetter interface { + // SetErrorCallback allows set an error callback of the corresponding + // iterator. Use nil to clear the callback. + SetErrorCallback(f func(err error)) +} + +type emptyIterator struct { + util.BasicReleaser + err error +} + +func (i *emptyIterator) rErr() { + if i.err == nil && i.Released() { + i.err = ErrIterReleased + } +} + +func (*emptyIterator) Valid() bool { return false } +func (i *emptyIterator) First() bool { i.rErr(); return false } +func (i *emptyIterator) Last() bool { i.rErr(); return false } +func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } +func (i *emptyIterator) Next() bool { i.rErr(); return false } +func (i *emptyIterator) Prev() bool { i.rErr(); return false } +func (*emptyIterator) Key() []byte { return nil } +func (*emptyIterator) Value() []byte { return nil } +func (i *emptyIterator) Error() error { return i.err } + +// NewEmptyIterator creates an empty iterator. The err parameter can be +// nil, but if not nil the given err will be returned by Error method. +func NewEmptyIterator(err error) Iterator { + return &emptyIterator{err: err} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go new file mode 100644 index 00000000000..1a7e29df8fb --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go @@ -0,0 +1,304 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type mergedIterator struct { + cmp comparer.Comparer + iters []Iterator + strict bool + + keys [][]byte + index int + dir dir + err error + errf func(err error) + releaser util.Releaser +} + +func assertKey(key []byte) []byte { + if key == nil { + panic("leveldb/iterator: nil key") + } + return key +} + +func (i *mergedIterator) iterErr(iter Iterator) bool { + if err := iter.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + if i.strict || !errors.IsCorrupted(err) { + i.err = err + return true + } + } + return false +} + +func (i *mergedIterator) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *mergedIterator) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.First(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirEOI + return i.prev() +} + +func (i *mergedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Seek(key): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) next() bool { + var key []byte + if i.dir == dirForward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirEOI + return false + } + i.dir = dirForward + return true +} + +func (i *mergedIterator) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirSOI: + return i.First() + case dirBackward: + key := append([]byte{}, i.keys[i.index]...) + if !i.Seek(key) { + return false + } + return i.Next() + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Next(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.next() +} + +func (i *mergedIterator) prev() bool { + var key []byte + if i.dir == dirBackward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirSOI + return false + } + i.dir = dirBackward + return true +} + +func (i *mergedIterator) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + key := append([]byte{}, i.keys[i.index]...) + for x, iter := range i.iters { + if x == i.index { + continue + } + seek := iter.Seek(key) + switch { + case seek && iter.Prev(), !seek && iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Prev(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.prev() +} + +func (i *mergedIterator) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.keys[i.index] +} + +func (i *mergedIterator) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.iters[i.index].Value() +} + +func (i *mergedIterator) Release() { + if i.dir != dirReleased { + i.dir = dirReleased + for _, iter := range i.iters { + iter.Release() + } + i.iters = nil + i.keys = nil + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *mergedIterator) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *mergedIterator) Error() error { + return i.err +} + +func (i *mergedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewMergedIterator returns an iterator that merges its input. Walking the +// resultant iterator will return all key/value pairs of all input iterators +// in strictly increasing key order, as defined by cmp. +// The input's key ranges may overlap, but there are assumed to be no duplicate +// keys: if iters[i] contains a key k then iters[j] will not contain that key k. +// None of the iters may be nil. +// +// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) +// won't be ignored and will halt 'merged iterator', otherwise the iterator will +// continue to the next 'input iterator'. +func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { + return &mergedIterator{ + iters: iters, + cmp: cmp, + strict: strict, + keys: make([][]byte, len(iters)), + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go new file mode 100644 index 00000000000..d094c3d0f8a --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go @@ -0,0 +1,524 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 +// License, authors and contributors informations can be found at bellow URLs respectively: +// https://code.google.com/p/leveldb-go/source/browse/LICENSE +// https://code.google.com/p/leveldb-go/source/browse/AUTHORS +// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS + +// Package journal reads and writes sequences of journals. Each journal is a stream +// of bytes that completes before the next journal starts. +// +// When reading, call Next to obtain an io.Reader for the next journal. Next will +// return io.EOF when there are no more journals. It is valid to call Next +// without reading the current journal to exhaustion. +// +// When writing, call Next to obtain an io.Writer for the next journal. Calling +// Next finishes the current journal. Call Close to finish the final journal. +// +// Optionally, call Flush to finish the current journal and flush the underlying +// writer without starting a new journal. To start a new journal after flushing, +// call Next. +// +// Neither Readers or Writers are safe to use concurrently. +// +// Example code: +// func read(r io.Reader) ([]string, error) { +// var ss []string +// journals := journal.NewReader(r, nil, true, true) +// for { +// j, err := journals.Next() +// if err == io.EOF { +// break +// } +// if err != nil { +// return nil, err +// } +// s, err := ioutil.ReadAll(j) +// if err != nil { +// return nil, err +// } +// ss = append(ss, string(s)) +// } +// return ss, nil +// } +// +// func write(w io.Writer, ss []string) error { +// journals := journal.NewWriter(w) +// for _, s := range ss { +// j, err := journals.Next() +// if err != nil { +// return err +// } +// if _, err := j.Write([]byte(s)), err != nil { +// return err +// } +// } +// return journals.Close() +// } +// +// The wire format is that the stream is divided into 32KiB blocks, and each +// block contains a number of tightly packed chunks. Chunks cannot cross block +// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a +// block must be zero. +// +// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 +// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) +// followed by a payload. The checksum is over the chunk type and the payload. +// +// There are four chunk types: whether the chunk is the full journal, or the +// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal +// has one first chunk, zero or more middle chunks, and one last chunk. +// +// The wire format allows for limited recovery in the face of data corruption: +// on a format error (such as a checksum mismatch), the reader moves to the +// next block and looks for the next full or first chunk. +package journal + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// These constants are part of the wire format and should not be changed. +const ( + fullChunkType = 1 + firstChunkType = 2 + middleChunkType = 3 + lastChunkType = 4 +) + +const ( + blockSize = 32 * 1024 + headerSize = 7 +) + +type flusher interface { + Flush() error +} + +// ErrCorrupted is the error type that generated by corrupted block or chunk. +type ErrCorrupted struct { + Size int + Reason string +} + +func (e *ErrCorrupted) Error() string { + return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) +} + +// Dropper is the interface that wrap simple Drop method. The Drop +// method will be called when the journal reader dropping a block or chunk. +type Dropper interface { + Drop(err error) +} + +// Reader reads journals from an underlying io.Reader. +type Reader struct { + // r is the underlying reader. + r io.Reader + // the dropper. + dropper Dropper + // strict flag. + strict bool + // checksum flag. + checksum bool + // seq is the sequence number of the current journal. + seq int + // buf[i:j] is the unread portion of the current chunk's payload. + // The low bound, i, excludes the chunk header. + i, j int + // n is the number of bytes of buf that are valid. Once reading has started, + // only the final block can have n < blockSize. + n int + // last is whether the current chunk is the last chunk of the journal. + last bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewReader returns a new reader. The dropper may be nil, and if +// strict is true then corrupted or invalid chunk will halt the journal +// reader entirely. +func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { + return &Reader{ + r: r, + dropper: dropper, + strict: strict, + checksum: checksum, + last: true, + } +} + +var errSkip = errors.New("leveldb/journal: skipped") + +func (r *Reader) corrupt(n int, reason string, skip bool) error { + if r.dropper != nil { + r.dropper.Drop(&ErrCorrupted{n, reason}) + } + if r.strict && !skip { + r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason}) + return r.err + } + return errSkip +} + +// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the +// next block into the buffer if necessary. +func (r *Reader) nextChunk(first bool) error { + for { + if r.j+headerSize <= r.n { + checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) + length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) + chunkType := r.buf[r.j+6] + unprocBlock := r.n - r.j + if checksum == 0 && length == 0 && chunkType == 0 { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "zero header", false) + } + if chunkType < fullChunkType || chunkType > lastChunkType { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false) + } + r.i = r.j + headerSize + r.j = r.j + headerSize + int(length) + if r.j > r.n { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "chunk length overflows block", false) + } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "checksum mismatch", false) + } + if first && chunkType != fullChunkType && chunkType != firstChunkType { + chunkLength := (r.j - r.i) + headerSize + r.i = r.j + // Report the error, but skip it. + return r.corrupt(chunkLength, "orphan chunk", true) + } + r.last = chunkType == fullChunkType || chunkType == lastChunkType + return nil + } + + // The last block. + if r.n < blockSize && r.n > 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + + // Read block. + n, err := io.ReadFull(r.r, r.buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return err + } + if n == 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + r.i, r.j, r.n = 0, 0, n + } +} + +// Next returns a reader for the next journal. It returns io.EOF if there are no +// more journals. The reader returned becomes stale after the next Next call, +// and should no longer be used. If strict is false, the reader will returns +// io.ErrUnexpectedEOF error when found corrupted journal. +func (r *Reader) Next() (io.Reader, error) { + r.seq++ + if r.err != nil { + return nil, r.err + } + r.i = r.j + for { + if err := r.nextChunk(true); err == nil { + break + } else if err != errSkip { + return nil, err + } + } + return &singleReader{r, r.seq, nil}, nil +} + +// Reset resets the journal reader, allows reuse of the journal reader. Reset returns +// last accumulated error. +func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { + r.seq++ + err := r.err + r.r = reader + r.dropper = dropper + r.strict = strict + r.checksum = checksum + r.i = 0 + r.j = 0 + r.n = 0 + r.last = true + r.err = nil + return err +} + +type singleReader struct { + r *Reader + seq int + err error +} + +func (x *singleReader) Read(p []byte) (int, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + n := copy(p, r.buf[r.i:r.j]) + r.i += n + return n, nil +} + +func (x *singleReader) ReadByte() (byte, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + c := r.buf[r.i] + r.i++ + return c, nil +} + +// Writer writes journals to an underlying io.Writer. +type Writer struct { + // w is the underlying writer. + w io.Writer + // seq is the sequence number of the current journal. + seq int + // f is w as a flusher. + f flusher + // buf[i:j] is the bytes that will become the current chunk. + // The low bound, i, includes the chunk header. + i, j int + // buf[:written] has already been written to w. + // written is zero unless Flush has been called. + written int + // first is whether the current chunk is the first chunk of the journal. + first bool + // pending is whether a chunk is buffered but not yet written. + pending bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewWriter returns a new Writer. +func NewWriter(w io.Writer) *Writer { + f, _ := w.(flusher) + return &Writer{ + w: w, + f: f, + } +} + +// fillHeader fills in the header for the pending chunk. +func (w *Writer) fillHeader(last bool) { + if w.i+headerSize > w.j || w.j > blockSize { + panic("leveldb/journal: bad writer state") + } + if last { + if w.first { + w.buf[w.i+6] = fullChunkType + } else { + w.buf[w.i+6] = lastChunkType + } + } else { + if w.first { + w.buf[w.i+6] = firstChunkType + } else { + w.buf[w.i+6] = middleChunkType + } + } + binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) + binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) +} + +// writeBlock writes the buffered block to the underlying writer, and reserves +// space for the next chunk's header. +func (w *Writer) writeBlock() { + _, w.err = w.w.Write(w.buf[w.written:]) + w.i = 0 + w.j = headerSize + w.written = 0 +} + +// writePending finishes the current journal and writes the buffer to the +// underlying writer. +func (w *Writer) writePending() { + if w.err != nil { + return + } + if w.pending { + w.fillHeader(true) + w.pending = false + } + _, w.err = w.w.Write(w.buf[w.written:w.j]) + w.written = w.j +} + +// Close finishes the current journal and closes the writer. +func (w *Writer) Close() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + w.err = errors.New("leveldb/journal: closed Writer") + return nil +} + +// Flush finishes the current journal, writes to the underlying writer, and +// flushes it if that writer implements interface{ Flush() error }. +func (w *Writer) Flush() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + if w.f != nil { + w.err = w.f.Flush() + return w.err + } + return nil +} + +// Reset resets the journal writer, allows reuse of the journal writer. Reset +// will also closes the journal writer if not already. +func (w *Writer) Reset(writer io.Writer) (err error) { + w.seq++ + if w.err == nil { + w.writePending() + err = w.err + } + w.w = writer + w.f, _ = writer.(flusher) + w.i = 0 + w.j = 0 + w.written = 0 + w.first = false + w.pending = false + w.err = nil + return +} + +// Next returns a writer for the next journal. The writer returned becomes stale +// after the next Close, Flush or Next call, and should no longer be used. +func (w *Writer) Next() (io.Writer, error) { + w.seq++ + if w.err != nil { + return nil, w.err + } + if w.pending { + w.fillHeader(true) + } + w.i = w.j + w.j = w.j + headerSize + // Check if there is room in the block for the header. + if w.j > blockSize { + // Fill in the rest of the block with zeroes. + for k := w.i; k < blockSize; k++ { + w.buf[k] = 0 + } + w.writeBlock() + if w.err != nil { + return nil, w.err + } + } + w.first = true + w.pending = true + return singleWriter{w, w.seq}, nil +} + +type singleWriter struct { + w *Writer + seq int +} + +func (x singleWriter) Write(p []byte) (int, error) { + w := x.w + if w.seq != x.seq { + return 0, errors.New("leveldb/journal: stale writer") + } + if w.err != nil { + return 0, w.err + } + n0 := len(p) + for len(p) > 0 { + // Write a block, if it is full. + if w.j == blockSize { + w.fillHeader(false) + w.writeBlock() + if w.err != nil { + return 0, w.err + } + w.first = false + } + // Copy bytes into the buffer. + n := copy(w.buf[w.j:], p) + w.j += n + p = p[n:] + } + return n0, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go new file mode 100644 index 00000000000..ad8f51ec85d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/key.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrInternalKeyCorrupted records internal key corruption. +type ErrInternalKeyCorrupted struct { + Ikey []byte + Reason string +} + +func (e *ErrInternalKeyCorrupted) Error() string { + return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason) +} + +func newErrInternalKeyCorrupted(ikey []byte, reason string) error { + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason}) +} + +type keyType uint + +func (kt keyType) String() string { + switch kt { + case keyTypeDel: + return "d" + case keyTypeVal: + return "v" + } + return fmt.Sprintf("", uint(kt)) +} + +// Value types encoded as the last component of internal keys. +// Don't modify; this value are saved to disk. +const ( + keyTypeDel = keyType(0) + keyTypeVal = keyType(1) +) + +// keyTypeSeek defines the keyType that should be passed when constructing an +// internal key for seeking to a particular sequence number (since we +// sort sequence numbers in decreasing order and the value type is +// embedded as the low 8 bits in the sequence number in internal keys, +// we need to use the highest-numbered ValueType, not the lowest). +const keyTypeSeek = keyTypeVal + +const ( + // Maximum value possible for sequence number; the 8-bits are + // used by value type, so its can packed together in single + // 64-bit integer. + keyMaxSeq = (uint64(1) << 56) - 1 + // Maximum value possible for packed sequence number and type. + keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek) +) + +// Maximum number encoded in bytes. +var keyMaxNumBytes = make([]byte, 8) + +func init() { + binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum) +} + +type internalKey []byte + +func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey { + if seq > keyMaxSeq { + panic("leveldb: invalid sequence number") + } else if kt > keyTypeVal { + panic("leveldb: invalid type") + } + + dst = ensureBuffer(dst, len(ukey)+8) + copy(dst, ukey) + binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt)) + return internalKey(dst) +} + +func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) { + if len(ik) < 8 { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length") + } + num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type") + } + ukey = ik[:len(ik)-8] + return +} + +func validInternalKey(ik []byte) bool { + _, _, _, err := parseInternalKey(ik) + return err == nil +} + +func (ik internalKey) assert() { + if ik == nil { + panic("leveldb: nil internalKey") + } + if len(ik) < 8 { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik))) + } +} + +func (ik internalKey) ukey() []byte { + ik.assert() + return ik[:len(ik)-8] +} + +func (ik internalKey) num() uint64 { + ik.assert() + return binary.LittleEndian.Uint64(ik[len(ik)-8:]) +} + +func (ik internalKey) parseNum() (seq uint64, kt keyType) { + num := ik.num() + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) + } + return +} + +func (ik internalKey) String() string { + if ik == nil { + return "" + } + + if ukey, seq, kt, err := parseInternalKey(ik); err == nil { + return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) + } + return fmt.Sprintf("", []byte(ik)) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go new file mode 100644 index 00000000000..b661c08a93e --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go @@ -0,0 +1,475 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package memdb provides in-memory key/value database implementation. +package memdb + +import ( + "math/rand" + "sync" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Common errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrIterReleased = errors.New("leveldb/memdb: iterator released") +) + +const tMaxHeight = 12 + +type dbIter struct { + util.BasicReleaser + p *DB + slice *util.Range + node int + forward bool + key, value []byte + err error +} + +func (i *dbIter) fill(checkStart, checkLimit bool) bool { + if i.node != 0 { + n := i.p.nodeData[i.node] + m := n + i.p.nodeData[i.node+nKey] + i.key = i.p.kvData[n:m] + if i.slice != nil { + switch { + case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: + fallthrough + case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: + i.node = 0 + goto bail + } + } + i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] + return true + } +bail: + i.key = nil + i.value = nil + return false +} + +func (i *dbIter) Valid() bool { + return i.node != 0 +} + +func (i *dbIter) First() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil { + i.node, _ = i.p.findGE(i.slice.Start, false) + } else { + i.node = i.p.nodeData[nNext] + } + return i.fill(false, true) +} + +func (i *dbIter) Last() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Limit != nil { + i.node = i.p.findLT(i.slice.Limit) + } else { + i.node = i.p.findLast() + } + return i.fill(true, false) +} + +func (i *dbIter) Seek(key []byte) bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { + key = i.slice.Start + } + i.node, _ = i.p.findGE(key, false) + return i.fill(false, true) +} + +func (i *dbIter) Next() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.node == 0 { + if !i.forward { + return i.First() + } + return false + } + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.nodeData[i.node+nNext] + return i.fill(false, true) +} + +func (i *dbIter) Prev() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.node == 0 { + if i.forward { + return i.Last() + } + return false + } + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.findLT(i.key) + return i.fill(true, false) +} + +func (i *dbIter) Key() []byte { + return i.key +} + +func (i *dbIter) Value() []byte { + return i.value +} + +func (i *dbIter) Error() error { return i.err } + +func (i *dbIter) Release() { + if !i.Released() { + i.p = nil + i.node = 0 + i.key = nil + i.value = nil + i.BasicReleaser.Release() + } +} + +const ( + nKV = iota + nKey + nVal + nHeight + nNext +) + +// DB is an in-memory key/value database. +type DB struct { + cmp comparer.BasicComparer + rnd *rand.Rand + + mu sync.RWMutex + kvData []byte + // Node data: + // [0] : KV offset + // [1] : Key length + // [2] : Value length + // [3] : Height + // [3..height] : Next nodes + nodeData []int + prevNode [tMaxHeight]int + maxHeight int + n int + kvSize int +} + +func (p *DB) randHeight() (h int) { + const branching = 4 + h = 1 + for h < tMaxHeight && p.rnd.Int()%branching == 0 { + h++ + } + return +} + +// Must hold RW-lock if prev == true, as it use shared prevNode slice. +func (p *DB) findGE(key []byte, prev bool) (int, bool) { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + cmp := 1 + if next != 0 { + o := p.nodeData[next] + cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) + } + if cmp < 0 { + // Keep searching in this list + node = next + } else { + if prev { + p.prevNode[h] = node + } else if cmp == 0 { + return next, true + } + if h == 0 { + return next, cmp == 0 + } + h-- + } + } +} + +func (p *DB) findLT(key []byte) int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + o := p.nodeData[next] + if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +func (p *DB) findLast() int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + if next == 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Put returns. +func (p *DB) Put(key []byte, value []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + if node, exact := p.findGE(key, true); exact { + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + p.nodeData[node] = kvOffset + m := p.nodeData[node+nVal] + p.nodeData[node+nVal] = len(value) + p.kvSize += len(value) - m + return nil + } + + h := p.randHeight() + if h > p.maxHeight { + for i := p.maxHeight; i < h; i++ { + p.prevNode[i] = 0 + } + p.maxHeight = h + } + + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + // Node + node := len(p.nodeData) + p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) + for i, n := range p.prevNode[:h] { + m := n + nNext + i + p.nodeData = append(p.nodeData, p.nodeData[m]) + p.nodeData[m] = node + } + + p.kvSize += len(key) + len(value) + p.n++ + return nil +} + +// Delete deletes the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (p *DB) Delete(key []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + node, exact := p.findGE(key, true) + if !exact { + return ErrNotFound + } + + h := p.nodeData[node+nHeight] + for i, n := range p.prevNode[:h] { + m := n + nNext + i + p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] + } + + p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] + p.n-- + return nil +} + +// Contains returns true if the given key are in the DB. +// +// It is safe to modify the contents of the arguments after Contains returns. +func (p *DB) Contains(key []byte) bool { + p.mu.RLock() + _, exact := p.findGE(key, false) + p.mu.RUnlock() + return exact +} + +// Get gets the value for the given key. It returns error.ErrNotFound if the +// DB does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (p *DB) Get(key []byte) (value []byte, err error) { + p.mu.RLock() + if node, exact := p.findGE(key, false); exact { + o := p.nodeData[node] + p.nodeData[node+nKey] + value = p.kvData[o : o+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Find returns. +func (p *DB) Find(key []byte) (rkey, value []byte, err error) { + p.mu.RLock() + if node, _ := p.findGE(key, false); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// NewIterator returns an iterator of the DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. However, the resultant key/value pairs are not guaranteed +// to be a consistent snapshot of the DB at a particular point in time. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { + return &dbIter{p: p, slice: slice} +} + +// Capacity returns keys/values buffer capacity. +func (p *DB) Capacity() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) +} + +// Size returns sum of keys and values length. Note that deleted +// key/value will not be accounted for, but it will still consume +// the buffer, since the buffer is append only. +func (p *DB) Size() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.kvSize +} + +// Free returns keys/values free buffer before need to grow. +func (p *DB) Free() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) - len(p.kvData) +} + +// Len returns the number of entries in the DB. +func (p *DB) Len() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.n +} + +// Reset resets the DB to initial empty state. Allows reuse the buffer. +func (p *DB) Reset() { + p.mu.Lock() + p.rnd = rand.New(rand.NewSource(0xdeadbeef)) + p.maxHeight = 1 + p.n = 0 + p.kvSize = 0 + p.kvData = p.kvData[:0] + p.nodeData = p.nodeData[:nNext+tMaxHeight] + p.nodeData[nKV] = 0 + p.nodeData[nKey] = 0 + p.nodeData[nVal] = 0 + p.nodeData[nHeight] = tMaxHeight + for n := 0; n < tMaxHeight; n++ { + p.nodeData[nNext+n] = 0 + p.prevNode[n] = 0 + } + p.mu.Unlock() +} + +// New creates a new initialized in-memory key/value DB. The capacity +// is the initial key/value buffer capacity. The capacity is advisory, +// not enforced. +// +// This DB is append-only, deleting an entry would remove entry node but not +// reclaim KV buffer. +// +// The returned DB instance is safe for concurrent use. +func New(cmp comparer.BasicComparer, capacity int) *DB { + p := &DB{ + cmp: cmp, + rnd: rand.New(rand.NewSource(0xdeadbeef)), + maxHeight: 1, + kvData: make([]byte, 0, capacity), + nodeData: make([]int, 4+tMaxHeight), + } + p.nodeData[nHeight] = tMaxHeight + return p +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go new file mode 100644 index 00000000000..44e7d9adce4 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go @@ -0,0 +1,684 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package opt provides sets of options used by LevelDB. +package opt + +import ( + "math" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/filter" +) + +const ( + KiB = 1024 + MiB = KiB * 1024 + GiB = MiB * 1024 +) + +var ( + DefaultBlockCacher = LRUCacher + DefaultBlockCacheCapacity = 8 * MiB + DefaultBlockRestartInterval = 16 + DefaultBlockSize = 4 * KiB + DefaultCompactionExpandLimitFactor = 25 + DefaultCompactionGPOverlapsFactor = 10 + DefaultCompactionL0Trigger = 4 + DefaultCompactionSourceLimitFactor = 1 + DefaultCompactionTableSize = 2 * MiB + DefaultCompactionTableSizeMultiplier = 1.0 + DefaultCompactionTotalSize = 10 * MiB + DefaultCompactionTotalSizeMultiplier = 10.0 + DefaultCompressionType = SnappyCompression + DefaultIteratorSamplingRate = 1 * MiB + DefaultOpenFilesCacher = LRUCacher + DefaultOpenFilesCacheCapacity = 500 + DefaultWriteBuffer = 4 * MiB + DefaultWriteL0PauseTrigger = 12 + DefaultWriteL0SlowdownTrigger = 8 +) + +// Cacher is a caching algorithm. +type Cacher interface { + New(capacity int) cache.Cacher +} + +type CacherFunc struct { + NewFunc func(capacity int) cache.Cacher +} + +func (f *CacherFunc) New(capacity int) cache.Cacher { + if f.NewFunc != nil { + return f.NewFunc(capacity) + } + return nil +} + +func noCacher(int) cache.Cacher { return nil } + +var ( + // LRUCacher is the LRU-cache algorithm. + LRUCacher = &CacherFunc{cache.NewLRU} + + // NoCacher is the value to disable caching algorithm. + NoCacher = &CacherFunc{} +) + +// Compression is the 'sorted table' block compression algorithm to use. +type Compression uint + +func (c Compression) String() string { + switch c { + case DefaultCompression: + return "default" + case NoCompression: + return "none" + case SnappyCompression: + return "snappy" + } + return "invalid" +} + +const ( + DefaultCompression Compression = iota + NoCompression + SnappyCompression + nCompression +) + +// Strict is the DB 'strict level'. +type Strict uint + +const ( + // If present then a corrupted or invalid chunk or block in manifest + // journal will cause an error instead of being dropped. + // This will prevent database with corrupted manifest to be opened. + StrictManifest Strict = 1 << iota + + // If present then journal chunk checksum will be verified. + StrictJournalChecksum + + // If present then a corrupted or invalid chunk or block in journal + // will cause an error instead of being dropped. + // This will prevent database with corrupted journal to be opened. + StrictJournal + + // If present then 'sorted table' block checksum will be verified. + // This has effect on both 'read operation' and compaction. + StrictBlockChecksum + + // If present then a corrupted 'sorted table' will fails compaction. + // The database will enter read-only mode. + StrictCompaction + + // If present then a corrupted 'sorted table' will halts 'read operation'. + StrictReader + + // If present then leveldb.Recover will drop corrupted 'sorted table'. + StrictRecovery + + // This only applicable for ReadOptions, if present then this ReadOptions + // 'strict level' will override global ones. + StrictOverride + + // StrictAll enables all strict flags. + StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery + + // DefaultStrict is the default strict flags. Specify any strict flags + // will override default strict flags as whole (i.e. not OR'ed). + DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader + + // NoStrict disables all strict flags. Override default strict flags. + NoStrict = ^StrictAll +) + +// Options holds the optional parameters for the DB at large. +type Options struct { + // AltFilters defines one or more 'alternative filters'. + // 'alternative filters' will be used during reads if a filter block + // does not match with the 'effective filter'. + // + // The default value is nil + AltFilters []filter.Filter + + // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. + // Specify NoCacher to disable caching algorithm. + // + // The default value is LRUCacher. + BlockCacher Cacher + + // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. + // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher. + // + // The default value is 8MiB. + BlockCacheCapacity int + + // BlockRestartInterval is the number of keys between restart points for + // delta encoding of keys. + // + // The default value is 16. + BlockRestartInterval int + + // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' + // block. + // + // The default value is 4KiB. + BlockSize int + + // CompactionExpandLimitFactor limits compaction size after expanded. + // This will be multiplied by table size limit at compaction target level. + // + // The default value is 25. + CompactionExpandLimitFactor int + + // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a + // single 'sorted table' generates. + // This will be multiplied by table size limit at grandparent level. + // + // The default value is 10. + CompactionGPOverlapsFactor int + + // CompactionL0Trigger defines number of 'sorted table' at level-0 that will + // trigger compaction. + // + // The default value is 4. + CompactionL0Trigger int + + // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to + // level-0. + // This will be multiplied by table size limit at compaction target level. + // + // The default value is 1. + CompactionSourceLimitFactor int + + // CompactionTableSize limits size of 'sorted table' that compaction generates. + // The limits for each level will be calculated as: + // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) + // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. + // + // The default value is 2MiB. + CompactionTableSize int + + // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. + // + // The default value is 1. + CompactionTableSizeMultiplier float64 + + // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for + // CompactionTableSize. + // Use zero to skip a level. + // + // The default value is nil. + CompactionTableSizeMultiplierPerLevel []float64 + + // CompactionTotalSize limits total size of 'sorted table' for each level. + // The limits for each level will be calculated as: + // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) + // The multiplier for each level can also fine-tuned using + // CompactionTotalSizeMultiplierPerLevel. + // + // The default value is 10MiB. + CompactionTotalSize int + + // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. + // + // The default value is 10. + CompactionTotalSizeMultiplier float64 + + // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for + // CompactionTotalSize. + // Use zero to skip a level. + // + // The default value is nil. + CompactionTotalSizeMultiplierPerLevel []float64 + + // Comparer defines a total ordering over the space of []byte keys: a 'less + // than' relationship. The same comparison algorithm must be used for reads + // and writes over the lifetime of the DB. + // + // The default value uses the same ordering as bytes.Compare. + Comparer comparer.Comparer + + // Compression defines the 'sorted table' block compression to use. + // + // The default value (DefaultCompression) uses snappy compression. + Compression Compression + + // DisableBufferPool allows disable use of util.BufferPool functionality. + // + // The default value is false. + DisableBufferPool bool + + // DisableBlockCache allows disable use of cache.Cache functionality on + // 'sorted table' block. + // + // The default value is false. + DisableBlockCache bool + + // DisableCompactionBackoff allows disable compaction retry backoff. + // + // The default value is false. + DisableCompactionBackoff bool + + // DisableLargeBatchTransaction allows disabling switch-to-transaction mode + // on large batch write. If enable batch writes large than WriteBuffer will + // use transaction. + // + // The default is false. + DisableLargeBatchTransaction bool + + // ErrorIfExist defines whether an error should returned if the DB already + // exist. + // + // The default value is false. + ErrorIfExist bool + + // ErrorIfMissing defines whether an error should returned if the DB is + // missing. If false then the database will be created if missing, otherwise + // an error will be returned. + // + // The default value is false. + ErrorIfMissing bool + + // Filter defines an 'effective filter' to use. An 'effective filter' + // if defined will be used to generate per-table filter block. + // The filter name will be stored on disk. + // During reads LevelDB will try to find matching filter from + // 'effective filter' and 'alternative filters'. + // + // Filter can be changed after a DB has been created. It is recommended + // to put old filter to the 'alternative filters' to mitigate lack of + // filter during transition period. + // + // A filter is used to reduce disk reads when looking for a specific key. + // + // The default value is nil. + Filter filter.Filter + + // IteratorSamplingRate defines approximate gap (in bytes) between read + // sampling of an iterator. The samples will be used to determine when + // compaction should be triggered. + // + // The default is 1MiB. + IteratorSamplingRate int + + // NoSync allows completely disable fsync. + // + // The default is false. + NoSync bool + + // NoWriteMerge allows disabling write merge. + // + // The default is false. + NoWriteMerge bool + + // OpenFilesCacher provides cache algorithm for open files caching. + // Specify NoCacher to disable caching algorithm. + // + // The default value is LRUCacher. + OpenFilesCacher Cacher + + // OpenFilesCacheCapacity defines the capacity of the open files caching. + // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. + // + // The default value is 500. + OpenFilesCacheCapacity int + + // If true then opens DB in read-only mode. + // + // The default value is false. + ReadOnly bool + + // Strict defines the DB strict level. + Strict Strict + + // WriteBuffer defines maximum size of a 'memdb' before flushed to + // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk + // unsorted journal. + // + // LevelDB may held up to two 'memdb' at the same time. + // + // The default value is 4MiB. + WriteBuffer int + + // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will + // pause write. + // + // The default value is 12. + WriteL0PauseTrigger int + + // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that + // will trigger write slowdown. + // + // The default value is 8. + WriteL0SlowdownTrigger int +} + +func (o *Options) GetAltFilters() []filter.Filter { + if o == nil { + return nil + } + return o.AltFilters +} + +func (o *Options) GetBlockCacher() Cacher { + if o == nil || o.BlockCacher == nil { + return DefaultBlockCacher + } else if o.BlockCacher == NoCacher { + return nil + } + return o.BlockCacher +} + +func (o *Options) GetBlockCacheCapacity() int { + if o == nil || o.BlockCacheCapacity == 0 { + return DefaultBlockCacheCapacity + } else if o.BlockCacheCapacity < 0 { + return 0 + } + return o.BlockCacheCapacity +} + +func (o *Options) GetBlockRestartInterval() int { + if o == nil || o.BlockRestartInterval <= 0 { + return DefaultBlockRestartInterval + } + return o.BlockRestartInterval +} + +func (o *Options) GetBlockSize() int { + if o == nil || o.BlockSize <= 0 { + return DefaultBlockSize + } + return o.BlockSize +} + +func (o *Options) GetCompactionExpandLimit(level int) int { + factor := DefaultCompactionExpandLimitFactor + if o != nil && o.CompactionExpandLimitFactor > 0 { + factor = o.CompactionExpandLimitFactor + } + return o.GetCompactionTableSize(level+1) * factor +} + +func (o *Options) GetCompactionGPOverlaps(level int) int { + factor := DefaultCompactionGPOverlapsFactor + if o != nil && o.CompactionGPOverlapsFactor > 0 { + factor = o.CompactionGPOverlapsFactor + } + return o.GetCompactionTableSize(level+2) * factor +} + +func (o *Options) GetCompactionL0Trigger() int { + if o == nil || o.CompactionL0Trigger == 0 { + return DefaultCompactionL0Trigger + } + return o.CompactionL0Trigger +} + +func (o *Options) GetCompactionSourceLimit(level int) int { + factor := DefaultCompactionSourceLimitFactor + if o != nil && o.CompactionSourceLimitFactor > 0 { + factor = o.CompactionSourceLimitFactor + } + return o.GetCompactionTableSize(level+1) * factor +} + +func (o *Options) GetCompactionTableSize(level int) int { + var ( + base = DefaultCompactionTableSize + mult float64 + ) + if o != nil { + if o.CompactionTableSize > 0 { + base = o.CompactionTableSize + } + if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { + mult = o.CompactionTableSizeMultiplierPerLevel[level] + } else if o.CompactionTableSizeMultiplier > 0 { + mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) + } + } + if mult == 0 { + mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) + } + return int(float64(base) * mult) +} + +func (o *Options) GetCompactionTotalSize(level int) int64 { + var ( + base = DefaultCompactionTotalSize + mult float64 + ) + if o != nil { + if o.CompactionTotalSize > 0 { + base = o.CompactionTotalSize + } + if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { + mult = o.CompactionTotalSizeMultiplierPerLevel[level] + } else if o.CompactionTotalSizeMultiplier > 0 { + mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) + } + } + if mult == 0 { + mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) + } + return int64(float64(base) * mult) +} + +func (o *Options) GetComparer() comparer.Comparer { + if o == nil || o.Comparer == nil { + return comparer.DefaultComparer + } + return o.Comparer +} + +func (o *Options) GetCompression() Compression { + if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { + return DefaultCompressionType + } + return o.Compression +} + +func (o *Options) GetDisableBufferPool() bool { + if o == nil { + return false + } + return o.DisableBufferPool +} + +func (o *Options) GetDisableBlockCache() bool { + if o == nil { + return false + } + return o.DisableBlockCache +} + +func (o *Options) GetDisableCompactionBackoff() bool { + if o == nil { + return false + } + return o.DisableCompactionBackoff +} + +func (o *Options) GetDisableLargeBatchTransaction() bool { + if o == nil { + return false + } + return o.DisableLargeBatchTransaction +} + +func (o *Options) GetErrorIfExist() bool { + if o == nil { + return false + } + return o.ErrorIfExist +} + +func (o *Options) GetErrorIfMissing() bool { + if o == nil { + return false + } + return o.ErrorIfMissing +} + +func (o *Options) GetFilter() filter.Filter { + if o == nil { + return nil + } + return o.Filter +} + +func (o *Options) GetIteratorSamplingRate() int { + if o == nil || o.IteratorSamplingRate <= 0 { + return DefaultIteratorSamplingRate + } + return o.IteratorSamplingRate +} + +func (o *Options) GetNoSync() bool { + if o == nil { + return false + } + return o.NoSync +} + +func (o *Options) GetNoWriteMerge() bool { + if o == nil { + return false + } + return o.NoWriteMerge +} + +func (o *Options) GetOpenFilesCacher() Cacher { + if o == nil || o.OpenFilesCacher == nil { + return DefaultOpenFilesCacher + } + if o.OpenFilesCacher == NoCacher { + return nil + } + return o.OpenFilesCacher +} + +func (o *Options) GetOpenFilesCacheCapacity() int { + if o == nil || o.OpenFilesCacheCapacity == 0 { + return DefaultOpenFilesCacheCapacity + } else if o.OpenFilesCacheCapacity < 0 { + return 0 + } + return o.OpenFilesCacheCapacity +} + +func (o *Options) GetReadOnly() bool { + if o == nil { + return false + } + return o.ReadOnly +} + +func (o *Options) GetStrict(strict Strict) bool { + if o == nil || o.Strict == 0 { + return DefaultStrict&strict != 0 + } + return o.Strict&strict != 0 +} + +func (o *Options) GetWriteBuffer() int { + if o == nil || o.WriteBuffer <= 0 { + return DefaultWriteBuffer + } + return o.WriteBuffer +} + +func (o *Options) GetWriteL0PauseTrigger() int { + if o == nil || o.WriteL0PauseTrigger == 0 { + return DefaultWriteL0PauseTrigger + } + return o.WriteL0PauseTrigger +} + +func (o *Options) GetWriteL0SlowdownTrigger() int { + if o == nil || o.WriteL0SlowdownTrigger == 0 { + return DefaultWriteL0SlowdownTrigger + } + return o.WriteL0SlowdownTrigger +} + +// ReadOptions holds the optional parameters for 'read operation'. The +// 'read operation' includes Get, Find and NewIterator. +type ReadOptions struct { + // DontFillCache defines whether block reads for this 'read operation' + // should be cached. If false then the block will be cached. This does + // not affects already cached block. + // + // The default value is false. + DontFillCache bool + + // Strict will be OR'ed with global DB 'strict level' unless StrictOverride + // is present. Currently only StrictReader that has effect here. + Strict Strict +} + +func (ro *ReadOptions) GetDontFillCache() bool { + if ro == nil { + return false + } + return ro.DontFillCache +} + +func (ro *ReadOptions) GetStrict(strict Strict) bool { + if ro == nil { + return false + } + return ro.Strict&strict != 0 +} + +// WriteOptions holds the optional parameters for 'write operation'. The +// 'write operation' includes Write, Put and Delete. +type WriteOptions struct { + // NoWriteMerge allows disabling write merge. + // + // The default is false. + NoWriteMerge bool + + // Sync is whether to sync underlying writes from the OS buffer cache + // through to actual disk, if applicable. Setting Sync can result in + // slower writes. + // + // If false, and the machine crashes, then some recent writes may be lost. + // Note that if it is just the process that crashes (and the machine does + // not) then no writes will be lost. + // + // In other words, Sync being false has the same semantics as a write + // system call. Sync being true means write followed by fsync. + // + // The default value is false. + Sync bool +} + +func (wo *WriteOptions) GetNoWriteMerge() bool { + if wo == nil { + return false + } + return wo.NoWriteMerge +} + +func (wo *WriteOptions) GetSync() bool { + if wo == nil { + return false + } + return wo.Sync +} + +func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { + if ro.GetStrict(StrictOverride) { + return ro.GetStrict(strict) + } else { + return o.GetStrict(strict) || ro.GetStrict(strict) + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go new file mode 100644 index 00000000000..b072b1ac4c7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/options.go @@ -0,0 +1,107 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +func dupOptions(o *opt.Options) *opt.Options { + newo := &opt.Options{} + if o != nil { + *newo = *o + } + if newo.Strict == 0 { + newo.Strict = opt.DefaultStrict + } + return newo +} + +func (s *session) setOptions(o *opt.Options) { + no := dupOptions(o) + // Alternative filters. + if filters := o.GetAltFilters(); len(filters) > 0 { + no.AltFilters = make([]filter.Filter, len(filters)) + for i, filter := range filters { + no.AltFilters[i] = &iFilter{filter} + } + } + // Comparer. + s.icmp = &iComparer{o.GetComparer()} + no.Comparer = s.icmp + // Filter. + if filter := o.GetFilter(); filter != nil { + no.Filter = &iFilter{filter} + } + + s.o = &cachedOptions{Options: no} + s.o.cache() +} + +const optCachedLevel = 7 + +type cachedOptions struct { + *opt.Options + + compactionExpandLimit []int + compactionGPOverlaps []int + compactionSourceLimit []int + compactionTableSize []int + compactionTotalSize []int64 +} + +func (co *cachedOptions) cache() { + co.compactionExpandLimit = make([]int, optCachedLevel) + co.compactionGPOverlaps = make([]int, optCachedLevel) + co.compactionSourceLimit = make([]int, optCachedLevel) + co.compactionTableSize = make([]int, optCachedLevel) + co.compactionTotalSize = make([]int64, optCachedLevel) + + for level := 0; level < optCachedLevel; level++ { + co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) + co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) + co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) + co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) + co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) + } +} + +func (co *cachedOptions) GetCompactionExpandLimit(level int) int { + if level < optCachedLevel { + return co.compactionExpandLimit[level] + } + return co.Options.GetCompactionExpandLimit(level) +} + +func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { + if level < optCachedLevel { + return co.compactionGPOverlaps[level] + } + return co.Options.GetCompactionGPOverlaps(level) +} + +func (co *cachedOptions) GetCompactionSourceLimit(level int) int { + if level < optCachedLevel { + return co.compactionSourceLimit[level] + } + return co.Options.GetCompactionSourceLimit(level) +} + +func (co *cachedOptions) GetCompactionTableSize(level int) int { + if level < optCachedLevel { + return co.compactionTableSize[level] + } + return co.Options.GetCompactionTableSize(level) +} + +func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { + if level < optCachedLevel { + return co.compactionTotalSize[level] + } + return co.Options.GetCompactionTotalSize(level) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go new file mode 100644 index 00000000000..3f391f93462 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go @@ -0,0 +1,210 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "io" + "os" + "sync" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrManifestCorrupted records manifest corruption. This error will be +// wrapped with errors.ErrCorrupted. +type ErrManifestCorrupted struct { + Field string + Reason string +} + +func (e *ErrManifestCorrupted) Error() string { + return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) +} + +func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error { + return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason}) +} + +// session represent a persistent database session. +type session struct { + // Need 64-bit alignment. + stNextFileNum int64 // current unused file number + stJournalNum int64 // current journal file number; need external synchronization + stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stTempFileNum int64 + stSeqNum uint64 // last mem compacted seq; need external synchronization + + stor *iStorage + storLock storage.Locker + o *cachedOptions + icmp *iComparer + tops *tOps + fileRef map[int64]int + + manifest *journal.Writer + manifestWriter storage.Writer + manifestFd storage.FileDesc + + stCompPtrs []internalKey // compaction pointers; need external synchronization + stVersion *version // current version + vmu sync.Mutex +} + +// Creates new initialized session instance. +func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { + if stor == nil { + return nil, os.ErrInvalid + } + storLock, err := stor.Lock() + if err != nil { + return + } + s = &session{ + stor: newIStorage(stor), + storLock: storLock, + fileRef: make(map[int64]int), + } + s.setOptions(o) + s.tops = newTableOps(s) + s.setVersion(newVersion(s)) + s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") + return +} + +// Close session. +func (s *session) close() { + s.tops.close() + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + s.manifest = nil + s.manifestWriter = nil + s.setVersion(&version{s: s, closing: true}) +} + +// Release session lock. +func (s *session) release() { + s.storLock.Unlock() +} + +// Create a new database session; need external synchronization. +func (s *session) create() error { + // create manifest + return s.newManifest(nil, nil) +} + +// Recover a database session; need external synchronization. +func (s *session) recover() (err error) { + defer func() { + if os.IsNotExist(err) { + // Don't return os.ErrNotExist if the underlying storage contains + // other files that belong to LevelDB. So the DB won't get trashed. + if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 { + err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} + } + } + }() + + fd, err := s.stor.GetMeta() + if err != nil { + return + } + + reader, err := s.stor.Open(fd) + if err != nil { + return + } + defer reader.Close() + + var ( + // Options. + strict = s.o.GetStrict(opt.StrictManifest) + + jr = journal.NewReader(reader, dropper{s, fd}, strict, true) + rec = &sessionRecord{} + staging = s.stVersion.newStaging() + ) + for { + var r io.Reader + r, err = jr.Next() + if err != nil { + if err == io.EOF { + err = nil + break + } + return errors.SetFd(err, fd) + } + + err = rec.decode(r) + if err == nil { + // save compact pointers + for _, r := range rec.compPtrs { + s.setCompPtr(r.level, internalKey(r.ikey)) + } + // commit record to version staging + staging.commit(rec) + } else { + err = errors.SetFd(err, fd) + if strict || !errors.IsCorrupted(err) { + return + } + s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd)) + } + rec.resetCompPtrs() + rec.resetAddedTables() + rec.resetDeletedTables() + } + + switch { + case !rec.has(recComparer): + return newErrManifestCorrupted(fd, "comparer", "missing") + case rec.comparer != s.icmp.uName(): + return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) + case !rec.has(recNextFileNum): + return newErrManifestCorrupted(fd, "next-file-num", "missing") + case !rec.has(recJournalNum): + return newErrManifestCorrupted(fd, "journal-file-num", "missing") + case !rec.has(recSeqNum): + return newErrManifestCorrupted(fd, "seq-num", "missing") + } + + s.manifestFd = fd + s.setVersion(staging.finish()) + s.setNextFileNum(rec.nextFileNum) + s.recordCommited(rec) + return nil +} + +// Commit session; need external synchronization. +func (s *session) commit(r *sessionRecord) (err error) { + v := s.version() + defer v.release() + + // spawn new version based on current version + nv := v.spawn(r) + + if s.manifest == nil { + // manifest journal writer not yet created, create one + err = s.newManifest(r, nv) + } else { + err = s.flushManifest(r) + } + + // finally, apply new version if no error rise + if err == nil { + s.setVersion(nv) + } + + return +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go new file mode 100644 index 00000000000..089cd00b26d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go @@ -0,0 +1,302 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int { + v := s.version() + defer v.release() + return v.pickMemdbLevel(umin, umax, maxLevel) +} + +func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) { + // Create sorted table. + iter := mdb.NewIterator(nil) + defer iter.Release() + t, n, err := s.tops.createFrom(iter) + if err != nil { + return 0, err + } + + // Pick level other than zero can cause compaction issue with large + // bulk insert and delete on strictly incrementing key-space. The + // problem is that the small deletion markers trapped at lower level, + // while key/value entries keep growing at higher level. Since the + // key-space is strictly incrementing it will not overlaps with + // higher level, thus maximum possible level is always picked, while + // overlapping deletion marker pushed into lower level. + // See: https://github.com/syndtr/goleveldb/issues/127. + flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel) + rec.addTableFile(flushLevel, t) + + s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + return flushLevel, nil +} + +// Pick a compaction based on current state; need external synchronization. +func (s *session) pickCompaction() *compaction { + v := s.version() + + var sourceLevel int + var t0 tFiles + if v.cScore >= 1 { + sourceLevel = v.cLevel + cptr := s.getCompPtr(sourceLevel) + tables := v.levels[sourceLevel] + for _, t := range tables { + if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { + t0 = append(t0, t) + break + } + } + if len(t0) == 0 { + t0 = append(t0, tables[0]) + } + } else { + if p := atomic.LoadPointer(&v.cSeek); p != nil { + ts := (*tSet)(p) + sourceLevel = ts.level + t0 = append(t0, ts.table) + } else { + v.release() + return nil + } + } + + return newCompaction(s, v, sourceLevel, t0) +} + +// Create compaction from given level and range; need external synchronization. +func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction { + v := s.version() + + if sourceLevel >= len(v.levels) { + v.release() + return nil + } + + t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0) + if len(t0) == 0 { + v.release() + return nil + } + + // Avoid compacting too much in one shot in case the range is large. + // But we cannot do this for level-0 since level-0 files can overlap + // and we must not pick one file and drop another older file if the + // two files overlap. + if !noLimit && sourceLevel > 0 { + limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel)) + total := int64(0) + for i, t := range t0 { + total += t.size + if total >= limit { + s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) + t0 = t0[:i+1] + break + } + } + } + + return newCompaction(s, v, sourceLevel, t0) +} + +func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction { + c := &compaction{ + s: s, + v: v, + sourceLevel: sourceLevel, + levels: [2]tFiles{t0, nil}, + maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)), + tPtrs: make([]int, len(v.levels)), + } + c.expand() + c.save() + return c +} + +// compaction represent a compaction state. +type compaction struct { + s *session + v *version + + sourceLevel int + levels [2]tFiles + maxGPOverlaps int64 + + gp tFiles + gpi int + seenKey bool + gpOverlappedBytes int64 + imin, imax internalKey + tPtrs []int + released bool + + snapGPI int + snapSeenKey bool + snapGPOverlappedBytes int64 + snapTPtrs []int +} + +func (c *compaction) save() { + c.snapGPI = c.gpi + c.snapSeenKey = c.seenKey + c.snapGPOverlappedBytes = c.gpOverlappedBytes + c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) +} + +func (c *compaction) restore() { + c.gpi = c.snapGPI + c.seenKey = c.snapSeenKey + c.gpOverlappedBytes = c.snapGPOverlappedBytes + c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) +} + +func (c *compaction) release() { + if !c.released { + c.released = true + c.v.release() + } +} + +// Expand compacted tables; need external synchronization. +func (c *compaction) expand() { + limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel)) + vt0 := c.v.levels[c.sourceLevel] + vt1 := tFiles{} + if level := c.sourceLevel + 1; level < len(c.v.levels) { + vt1 = c.v.levels[level] + } + + t0, t1 := c.levels[0], c.levels[1] + imin, imax := t0.getRange(c.s.icmp) + // We expand t0 here just incase ukey hop across tables. + t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0) + if len(t0) != len(c.levels[0]) { + imin, imax = t0.getRange(c.s.icmp) + } + t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) + // Get entire range covered by compaction. + amin, amax := append(t0, t1...).getRange(c.s.icmp) + + // See if we can grow the number of inputs in "sourceLevel" without + // changing the number of "sourceLevel+1" files we pick up. + if len(t1) > 0 { + exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0) + if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { + xmin, xmax := exp0.getRange(c.s.icmp) + exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) + if len(exp1) == len(t1) { + c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", + c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), + len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) + imin, imax = xmin, xmax + t0, t1 = exp0, exp1 + amin, amax = append(t0, t1...).getRange(c.s.icmp) + } + } + } + + // Compute the set of grandparent files that overlap this compaction + // (parent == sourceLevel+1; grandparent == sourceLevel+2) + if level := c.sourceLevel + 2; level < len(c.v.levels) { + c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) + } + + c.levels[0], c.levels[1] = t0, t1 + c.imin, c.imax = imin, imax +} + +// Check whether compaction is trivial. +func (c *compaction) trivial() bool { + return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps +} + +func (c *compaction) baseLevelForKey(ukey []byte) bool { + for level := c.sourceLevel + 2; level < len(c.v.levels); level++ { + tables := c.v.levels[level] + for c.tPtrs[level] < len(tables) { + t := tables[c.tPtrs[level]] + if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { + // We've advanced far enough. + if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + // Key falls in this file's range, so definitely not base level. + return false + } + break + } + c.tPtrs[level]++ + } + } + return true +} + +func (c *compaction) shouldStopBefore(ikey internalKey) bool { + for ; c.gpi < len(c.gp); c.gpi++ { + gp := c.gp[c.gpi] + if c.s.icmp.Compare(ikey, gp.imax) <= 0 { + break + } + if c.seenKey { + c.gpOverlappedBytes += gp.size + } + } + c.seenKey = true + + if c.gpOverlappedBytes > c.maxGPOverlaps { + // Too much overlap for current output; start new output. + c.gpOverlappedBytes = 0 + return true + } + return false +} + +// Creates an iterator. +func (c *compaction) newIterator() iterator.Iterator { + // Creates iterator slice. + icap := len(c.levels) + if c.sourceLevel == 0 { + // Special case for level-0. + icap = len(c.levels[0]) + 1 + } + its := make([]iterator.Iterator, 0, icap) + + // Options. + ro := &opt.ReadOptions{ + DontFillCache: true, + Strict: opt.StrictOverride, + } + strict := c.s.o.GetStrict(opt.StrictCompaction) + if strict { + ro.Strict |= opt.StrictReader + } + + for i, tables := range c.levels { + if len(tables) == 0 { + continue + } + + // Level-0 is not sorted and may overlaps each other. + if c.sourceLevel+i == 0 { + for _, t := range tables { + its = append(its, c.s.tops.newIterator(t, nil, ro)) + } + } else { + it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) + its = append(its, it) + } + } + + return iterator.NewMergedIterator(its, c.s.icmp, strict) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go new file mode 100644 index 00000000000..854e1aa6f9b --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go @@ -0,0 +1,323 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bufio" + "encoding/binary" + "io" + "strings" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +// These numbers are written to disk and should not be changed. +const ( + recComparer = 1 + recJournalNum = 2 + recNextFileNum = 3 + recSeqNum = 4 + recCompPtr = 5 + recDelTable = 6 + recAddTable = 7 + // 8 was used for large value refs + recPrevJournalNum = 9 +) + +type cpRecord struct { + level int + ikey internalKey +} + +type atRecord struct { + level int + num int64 + size int64 + imin internalKey + imax internalKey +} + +type dtRecord struct { + level int + num int64 +} + +type sessionRecord struct { + hasRec int + comparer string + journalNum int64 + prevJournalNum int64 + nextFileNum int64 + seqNum uint64 + compPtrs []cpRecord + addedTables []atRecord + deletedTables []dtRecord + + scratch [binary.MaxVarintLen64]byte + err error +} + +func (p *sessionRecord) has(rec int) bool { + return p.hasRec&(1< +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// Logging. + +type dropper struct { + s *session + fd storage.FileDesc +} + +func (d dropper) Drop(err error) { + if e, ok := err.(*journal.ErrCorrupted); ok { + d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason) + } else { + d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err) + } +} + +func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } +func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } + +// File utils. + +func (s *session) newTemp() storage.FileDesc { + num := atomic.AddInt64(&s.stTempFileNum, 1) - 1 + return storage.FileDesc{storage.TypeTemp, num} +} + +func (s *session) addFileRef(fd storage.FileDesc, ref int) int { + ref += s.fileRef[fd.Num] + if ref > 0 { + s.fileRef[fd.Num] = ref + } else if ref == 0 { + delete(s.fileRef, fd.Num) + } else { + panic(fmt.Sprintf("negative ref: %v", fd)) + } + return ref +} + +// Session state. + +// Get current version. This will incr version ref, must call +// version.release (exactly once) after use. +func (s *session) version() *version { + s.vmu.Lock() + defer s.vmu.Unlock() + s.stVersion.incref() + return s.stVersion +} + +func (s *session) tLen(level int) int { + s.vmu.Lock() + defer s.vmu.Unlock() + return s.stVersion.tLen(level) +} + +// Set current version to v. +func (s *session) setVersion(v *version) { + s.vmu.Lock() + defer s.vmu.Unlock() + // Hold by session. It is important to call this first before releasing + // current version, otherwise the still used files might get released. + v.incref() + if s.stVersion != nil { + // Release current version. + s.stVersion.releaseNB() + } + s.stVersion = v +} + +// Get current unused file number. +func (s *session) nextFileNum() int64 { + return atomic.LoadInt64(&s.stNextFileNum) +} + +// Set current unused file number to num. +func (s *session) setNextFileNum(num int64) { + atomic.StoreInt64(&s.stNextFileNum, num) +} + +// Mark file number as used. +func (s *session) markFileNum(num int64) { + nextFileNum := num + 1 + for { + old, x := s.stNextFileNum, nextFileNum + if old > x { + x = old + } + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { + break + } + } +} + +// Allocate a file number. +func (s *session) allocFileNum() int64 { + return atomic.AddInt64(&s.stNextFileNum, 1) - 1 +} + +// Reuse given file number. +func (s *session) reuseFileNum(num int64) { + for { + old, x := s.stNextFileNum, num + if old != x+1 { + x = old + } + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { + break + } + } +} + +// Set compaction ptr at given level; need external synchronization. +func (s *session) setCompPtr(level int, ik internalKey) { + if level >= len(s.stCompPtrs) { + newCompPtrs := make([]internalKey, level+1) + copy(newCompPtrs, s.stCompPtrs) + s.stCompPtrs = newCompPtrs + } + s.stCompPtrs[level] = append(internalKey{}, ik...) +} + +// Get compaction ptr at given level; need external synchronization. +func (s *session) getCompPtr(level int) internalKey { + if level >= len(s.stCompPtrs) { + return nil + } + return s.stCompPtrs[level] +} + +// Manifest related utils. + +// Fill given session record obj with current states; need external +// synchronization. +func (s *session) fillRecord(r *sessionRecord, snapshot bool) { + r.setNextFileNum(s.nextFileNum()) + + if snapshot { + if !r.has(recJournalNum) { + r.setJournalNum(s.stJournalNum) + } + + if !r.has(recSeqNum) { + r.setSeqNum(s.stSeqNum) + } + + for level, ik := range s.stCompPtrs { + if ik != nil { + r.addCompPtr(level, ik) + } + } + + r.setComparer(s.icmp.uName()) + } +} + +// Mark if record has been committed, this will update session state; +// need external synchronization. +func (s *session) recordCommited(rec *sessionRecord) { + if rec.has(recJournalNum) { + s.stJournalNum = rec.journalNum + } + + if rec.has(recPrevJournalNum) { + s.stPrevJournalNum = rec.prevJournalNum + } + + if rec.has(recSeqNum) { + s.stSeqNum = rec.seqNum + } + + for _, r := range rec.compPtrs { + s.setCompPtr(r.level, internalKey(r.ikey)) + } +} + +// Create a new manifest file; need external synchronization. +func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { + fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()} + writer, err := s.stor.Create(fd) + if err != nil { + return + } + jw := journal.NewWriter(writer) + + if v == nil { + v = s.version() + defer v.release() + } + if rec == nil { + rec = &sessionRecord{} + } + s.fillRecord(rec, true) + v.fillRecord(rec) + + defer func() { + if err == nil { + s.recordCommited(rec) + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + if !s.manifestFd.Zero() { + s.stor.Remove(s.manifestFd) + } + s.manifestFd = fd + s.manifestWriter = writer + s.manifest = jw + } else { + writer.Close() + s.stor.Remove(fd) + s.reuseFileNum(fd.Num) + } + }() + + w, err := jw.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = jw.Flush() + if err != nil { + return + } + err = s.stor.SetMeta(fd) + return +} + +// Flush record to disk. +func (s *session) flushManifest(rec *sessionRecord) (err error) { + s.fillRecord(rec, false) + w, err := s.manifest.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = s.manifest.Flush() + if err != nil { + return + } + if !s.o.GetNoSync() { + err = s.manifestWriter.Sync() + if err != nil { + return + } + } + s.recordCommited(rec) + return +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go new file mode 100644 index 00000000000..d45fb5dfeb1 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go @@ -0,0 +1,63 @@ +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/storage" + "sync/atomic" +) + +type iStorage struct { + storage.Storage + read uint64 + write uint64 +} + +func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) { + r, err := c.Storage.Open(fd) + return &iStorageReader{r, c}, err +} + +func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) { + w, err := c.Storage.Create(fd) + return &iStorageWriter{w, c}, err +} + +func (c *iStorage) reads() uint64 { + return atomic.LoadUint64(&c.read) +} + +func (c *iStorage) writes() uint64 { + return atomic.LoadUint64(&c.write) +} + +// newIStorage returns the given storage wrapped by iStorage. +func newIStorage(s storage.Storage) *iStorage { + return &iStorage{s, 0, 0} +} + +type iStorageReader struct { + storage.Reader + c *iStorage +} + +func (r *iStorageReader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + atomic.AddUint64(&r.c.read, uint64(n)) + return n, err +} + +func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) { + n, err = r.Reader.ReadAt(p, off) + atomic.AddUint64(&r.c.read, uint64(n)) + return n, err +} + +type iStorageWriter struct { + storage.Writer + c *iStorage +} + +func (w *iStorageWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + atomic.AddUint64(&w.c.write, uint64(n)) + return n, err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go new file mode 100644 index 00000000000..9ba71fd6d10 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -0,0 +1,671 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reservefs. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +var ( + errFileOpen = errors.New("leveldb/storage: file still open") + errReadOnly = errors.New("leveldb/storage: storage is read-only") +) + +type fileLock interface { + release() error +} + +type fileStorageLock struct { + fs *fileStorage +} + +func (lock *fileStorageLock) Unlock() { + if lock.fs != nil { + lock.fs.mu.Lock() + defer lock.fs.mu.Unlock() + if lock.fs.slock == lock { + lock.fs.slock = nil + } + } +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func writeFileSynced(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Sync(); err == nil { + err = err1 + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +const logSizeThreshold = 1024 * 1024 // 1 MiB + +// fileStorage is a file-system backed storage. +type fileStorage struct { + path string + readOnly bool + + mu sync.Mutex + flock fileLock + slock *fileStorageLock + logw *os.File + logSize int64 + buf []byte + // Opened file counter; if open < 0 means closed. + open int + day int +} + +// OpenFile returns a new filesystem-backed storage implementation with the given +// path. This also acquire a file lock, so any subsequent attempt to open the +// same path will fail. +// +// The storage must be closed after use, by calling Close method. +func OpenFile(path string, readOnly bool) (Storage, error) { + if fi, err := os.Stat(path); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path) + } + } else if os.IsNotExist(err) && !readOnly { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + } else { + return nil, err + } + + flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + flock.release() + } + }() + + var ( + logw *os.File + logSize int64 + ) + if !readOnly { + logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + logSize, err = logw.Seek(0, os.SEEK_END) + if err != nil { + logw.Close() + return nil, err + } + } + + fs := &fileStorage{ + path: path, + readOnly: readOnly, + flock: flock, + logw: logw, + logSize: logSize, + } + runtime.SetFinalizer(fs, (*fileStorage).Close) + return fs, nil +} + +func (fs *fileStorage) Lock() (Locker, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + if fs.readOnly { + return &fileStorageLock{}, nil + } + if fs.slock != nil { + return nil, ErrLocked + } + fs.slock = &fileStorageLock{fs: fs} + return fs.slock, nil +} + +func itoa(buf []byte, i int, wid int) []byte { + u := uint(i) + if u == 0 && wid <= 1 { + return append(buf, '0') + } + + // Assemble decimal in reverse order. + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + return append(buf, b[bp:]...) +} + +func (fs *fileStorage) printDay(t time.Time) { + if fs.day == t.Day() { + return + } + fs.day = t.Day() + fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) +} + +func (fs *fileStorage) doLog(t time.Time, str string) { + if fs.logSize > logSizeThreshold { + // Rotate log file. + fs.logw.Close() + fs.logw = nil + fs.logSize = 0 + rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")) + } + if fs.logw == nil { + var err error + fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return + } + // Force printDay on new log file. + fs.day = 0 + } + fs.printDay(t) + hour, min, sec := t.Clock() + msec := t.Nanosecond() / 1e3 + // time + fs.buf = itoa(fs.buf[:0], hour, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, min, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, sec, 2) + fs.buf = append(fs.buf, '.') + fs.buf = itoa(fs.buf, msec, 6) + fs.buf = append(fs.buf, ' ') + // write + fs.buf = append(fs.buf, []byte(str)...) + fs.buf = append(fs.buf, '\n') + n, _ := fs.logw.Write(fs.buf) + fs.logSize += int64(n) +} + +func (fs *fileStorage) Log(str string) { + if !fs.readOnly { + t := time.Now() + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return + } + fs.doLog(t, str) + } +} + +func (fs *fileStorage) log(str string) { + if !fs.readOnly { + fs.doLog(time.Now(), str) + } +} + +func (fs *fileStorage) setMeta(fd FileDesc) error { + content := fsGenName(fd) + "\n" + // Check and backup old CURRENT file. + currentPath := filepath.Join(fs.path, "CURRENT") + if _, err := os.Stat(currentPath); err == nil { + b, err := ioutil.ReadFile(currentPath) + if err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + if string(b) == content { + // Content not changed, do nothing. + return nil + } + if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + } else if !os.IsNotExist(err) { + return err + } + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + if err := writeFileSynced(path, []byte(content), 0644); err != nil { + fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err)) + return err + } + // Replace CURRENT file. + if err := rename(path, currentPath); err != nil { + fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) + return err + } + // Sync root directory. + if err := syncDir(fs.path); err != nil { + fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + return nil +} + +func (fs *fileStorage) SetMeta(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + return fs.setMeta(fd) +} + +func (fs *fileStorage) GetMeta() (FileDesc, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return FileDesc{}, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return FileDesc{}, err + } + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if ce := dir.Close(); ce != nil { + fs.log(fmt.Sprintf("close dir: %v", ce)) + } + if err != nil { + return FileDesc{}, err + } + // Try this in order: + // - CURRENT.[0-9]+ ('pending rename' file, descending order) + // - CURRENT + // - CURRENT.bak + // + // Skip corrupted file or file that point to a missing target file. + type currentFile struct { + name string + fd FileDesc + } + tryCurrent := func(name string) (*currentFile, error) { + b, err := ioutil.ReadFile(filepath.Join(fs.path, name)) + if err != nil { + if os.IsNotExist(err) { + err = os.ErrNotExist + } + return nil, err + } + var fd FileDesc + if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) { + fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b)) + err := &ErrCorrupted{ + Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"), + } + return nil, err + } + if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil { + if os.IsNotExist(err) { + fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd)) + err = os.ErrNotExist + } + return nil, err + } + return ¤tFile{name: name, fd: fd}, nil + } + tryCurrents := func(names []string) (*currentFile, error) { + var ( + cur *currentFile + // Last corruption error. + lastCerr error + ) + for _, name := range names { + var err error + cur, err = tryCurrent(name) + if err == nil { + break + } else if err == os.ErrNotExist { + // Fallback to the next file. + } else if isCorrupted(err) { + lastCerr = err + // Fallback to the next file. + } else { + // In case the error is due to permission, etc. + return nil, err + } + } + if cur == nil { + err := os.ErrNotExist + if lastCerr != nil { + err = lastCerr + } + return nil, err + } + return cur, nil + } + + // Try 'pending rename' files. + var nums []int64 + for _, name := range names { + if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" { + i, err := strconv.ParseInt(name[8:], 10, 64) + if err == nil { + nums = append(nums, i) + } + } + } + var ( + pendCur *currentFile + pendErr = os.ErrNotExist + pendNames []string + ) + if len(nums) > 0 { + sort.Sort(sort.Reverse(int64Slice(nums))) + pendNames = make([]string, len(nums)) + for i, num := range nums { + pendNames[i] = fmt.Sprintf("CURRENT.%d", num) + } + pendCur, pendErr = tryCurrents(pendNames) + if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + } + + // Try CURRENT and CURRENT.bak. + curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"}) + if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) { + return FileDesc{}, curErr + } + + // pendCur takes precedence, but guards against obsolete pendCur. + if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) { + curCur = pendCur + } + + if curCur != nil { + // Restore CURRENT file to proper state. + if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) { + // Ignore setMeta errors, however don't delete obsolete files if we + // catch error. + if err := fs.setMeta(curCur.fd); err == nil { + // Remove 'pending rename' files. + for _, name := range pendNames { + if err := os.Remove(filepath.Join(fs.path, name)); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", name, err)) + } + } + } + } + return curCur.fd, nil + } + + // Nothing found. + if isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + return FileDesc{}, curErr +} + +func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if cerr := dir.Close(); cerr != nil { + fs.log(fmt.Sprintf("close dir: %v", cerr)) + } + if err == nil { + for _, name := range names { + if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 { + fds = append(fds, fd) + } + } + } + return +} + +func (fs *fileStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0) + if err == nil { + goto ok + } + } + return nil, err + } +ok: + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil +} + +func (fs *fileStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + if fs.readOnly { + return nil, errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil, err + } + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil +} + +func (fs *fileStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + err := os.Remove(filepath.Join(fs.path, fsGenName(fd))) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) { + fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err)) + err = e1 + } + } else { + fs.log(fmt.Sprintf("remove %s: %v", fd, err)) + } + } + return err +} + +func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { + return ErrInvalidFile + } + if oldfd == newfd { + return nil + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd))) +} + +func (fs *fileStorage) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + // Clear the finalizer. + runtime.SetFinalizer(fs, nil) + + if fs.open > 0 { + fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) + } + fs.open = -1 + if fs.logw != nil { + fs.logw.Close() + } + return fs.flock.release() +} + +type fileWrap struct { + *os.File + fs *fileStorage + fd FileDesc + closed bool +} + +func (fw *fileWrap) Sync() error { + if err := fw.File.Sync(); err != nil { + return err + } + if fw.fd.Type == TypeManifest { + // Also sync parent directory if file type is manifest. + // See: https://code.google.com/p/leveldb/issues/detail?id=190. + if err := syncDir(fw.fs.path); err != nil { + fw.fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + } + return nil +} + +func (fw *fileWrap) Close() error { + fw.fs.mu.Lock() + defer fw.fs.mu.Unlock() + if fw.closed { + return ErrClosed + } + fw.closed = true + fw.fs.open-- + err := fw.File.Close() + if err != nil { + fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err)) + } + return err +} + +func fsGenName(fd FileDesc) string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + panic("invalid file type") + } +} + +func fsHasOldName(fd FileDesc) bool { + return fd.Type == TypeTable +} + +func fsGenOldName(fd FileDesc) string { + switch fd.Type { + case TypeTable: + return fmt.Sprintf("%06d.sst", fd.Num) + } + return fsGenName(fd) +} + +func fsParseName(name string) (fd FileDesc, ok bool) { + var tail string + _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail) + if err == nil { + switch tail { + case "log": + fd.Type = TypeJournal + case "ldb", "sst": + fd.Type = TypeTable + case "tmp": + fd.Type = TypeTemp + default: + return + } + return fd, true + } + n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail) + if n == 1 { + fd.Type = TypeManifest + return fd, true + } + return +} + +func fsParseNamePtr(name string, fd *FileDesc) bool { + _fd, ok := fsParseName(name) + if fd != nil { + *fd = _fd + } + return ok +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go new file mode 100644 index 00000000000..5545aeef2a8 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go @@ -0,0 +1,34 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build nacl + +package storage + +import ( + "os" + "syscall" +) + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + return nil, syscall.ENOTSUP +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + return syscall.ENOTSUP +} + +func rename(oldpath, newpath string) error { + return syscall.ENOTSUP +} + +func isErrInvalid(err error) bool { + return false +} + +func syncDir(name string) error { + return syscall.ENOTSUP +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go new file mode 100644 index 00000000000..b8297980126 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go @@ -0,0 +1,63 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "os" +) + +type plan9FileLock struct { + f *os.File +} + +func (fl *plan9FileLock) release() error { + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var ( + flag int + perm os.FileMode + ) + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + perm = os.ModeExclusive + } + f, err := os.OpenFile(path, flag, perm) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644) + } + if err != nil { + return + } + fl = &plan9FileLock{f: f} + return +} + +func rename(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err := os.Remove(newpath); err != nil { + return err + } + } + + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go new file mode 100644 index 00000000000..79901ee4a7a --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go @@ -0,0 +1,81 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build solaris + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } + if err != nil { + return + } + err = setFileLock(f, readOnly, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + flock := syscall.Flock_t{ + Type: syscall.F_UNLCK, + Start: 0, + Len: 0, + Whence: 1, + } + if lock { + if readOnly { + flock.Type = syscall.F_RDLCK + } else { + flock.Type = syscall.F_WRLCK + } + } + return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go new file mode 100644 index 00000000000..d75f66a9efc --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -0,0 +1,98 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } + if err != nil { + return + } + err = setFileLock(f, readOnly, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + how := syscall.LOCK_UN + if lock { + if readOnly { + how = syscall.LOCK_SH + } else { + how = syscall.LOCK_EX + } + } + return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func isErrInvalid(err error) bool { + if err == os.ErrInvalid { + return true + } + // Go < 1.8 + if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL { + return true + } + // Go >= 1.8 returns *os.PathError instead + if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL { + return true + } + return false +} + +func syncDir(name string) error { + // As per fsync manpage, Linux seems to expect fsync on directory, however + // some system don't support this, so we will ignore syscall.EINVAL. + // + // From fsync(2): + // Calling fsync() does not necessarily ensure that the entry in the + // directory containing the file has also reached disk. For that an + // explicit fsync() on a file descriptor for the directory is also needed. + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil && !isErrInvalid(err) { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go new file mode 100644 index 00000000000..899335fd7e4 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go @@ -0,0 +1,78 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procMoveFileExW = modkernel32.NewProc("MoveFileExW") +) + +const ( + _MOVEFILE_REPLACE_EXISTING = 1 +) + +type windowsFileLock struct { + fd syscall.Handle +} + +func (fl *windowsFileLock) release() error { + return syscall.Close(fl.fd) +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return + } + var access, shareMode uint32 + if readOnly { + access = syscall.GENERIC_READ + shareMode = syscall.FILE_SHARE_READ + } else { + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if err == syscall.ERROR_FILE_NOT_FOUND { + fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + } + if err != nil { + return + } + fl = &windowsFileLock{fd: fd} + return +} + +func moveFileEx(from *uint16, to *uint16, flags uint32) error { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + return error(e1) + } + return syscall.EINVAL + } + return nil +} + +func rename(oldpath, newpath string) error { + from, err := syscall.UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := syscall.UTF16PtrFromString(newpath) + if err != nil { + return err + } + return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) +} + +func syncDir(name string) error { return nil } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go new file mode 100644 index 00000000000..838f1bee1ba --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go @@ -0,0 +1,222 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "os" + "sync" +) + +const typeShift = 4 + +// Verify at compile-time that typeShift is large enough to cover all FileType +// values by confirming that 0 == 0. +var _ [0]struct{} = [TypeAll >> typeShift]struct{}{} + +type memStorageLock struct { + ms *memStorage +} + +func (lock *memStorageLock) Unlock() { + ms := lock.ms + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock == lock { + ms.slock = nil + } + return +} + +// memStorage is a memory-backed storage. +type memStorage struct { + mu sync.Mutex + slock *memStorageLock + files map[uint64]*memFile + meta FileDesc +} + +// NewMemStorage returns a new memory-backed storage implementation. +func NewMemStorage() Storage { + return &memStorage{ + files: make(map[uint64]*memFile), + } +} + +func (ms *memStorage) Lock() (Locker, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock != nil { + return nil, ErrLocked + } + ms.slock = &memStorageLock{ms: ms} + return ms.slock, nil +} + +func (*memStorage) Log(str string) {} + +func (ms *memStorage) SetMeta(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + + ms.mu.Lock() + ms.meta = fd + ms.mu.Unlock() + return nil +} + +func (ms *memStorage) GetMeta() (FileDesc, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.meta.Zero() { + return FileDesc{}, os.ErrNotExist + } + return ms.meta, nil +} + +func (ms *memStorage) List(ft FileType) ([]FileDesc, error) { + ms.mu.Lock() + var fds []FileDesc + for x := range ms.files { + fd := unpackFile(x) + if fd.Type&ft != 0 { + fds = append(fds, fd) + } + } + ms.mu.Unlock() + return fds, nil +} + +func (ms *memStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + ms.mu.Lock() + defer ms.mu.Unlock() + if m, exist := ms.files[packFile(fd)]; exist { + if m.open { + return nil, errFileOpen + } + m.open = true + return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil + } + return nil, os.ErrNotExist +} + +func (ms *memStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + x := packFile(fd) + ms.mu.Lock() + defer ms.mu.Unlock() + m, exist := ms.files[x] + if exist { + if m.open { + return nil, errFileOpen + } + m.Reset() + } else { + m = &memFile{} + ms.files[x] = m + } + m.open = true + return &memWriter{memFile: m, ms: ms}, nil +} + +func (ms *memStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + + x := packFile(fd) + ms.mu.Lock() + defer ms.mu.Unlock() + if _, exist := ms.files[x]; exist { + delete(ms.files, x) + return nil + } + return os.ErrNotExist +} + +func (ms *memStorage) Rename(oldfd, newfd FileDesc) error { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { + return ErrInvalidFile + } + if oldfd == newfd { + return nil + } + + oldx := packFile(oldfd) + newx := packFile(newfd) + ms.mu.Lock() + defer ms.mu.Unlock() + oldm, exist := ms.files[oldx] + if !exist { + return os.ErrNotExist + } + newm, exist := ms.files[newx] + if (exist && newm.open) || oldm.open { + return errFileOpen + } + delete(ms.files, oldx) + ms.files[newx] = oldm + return nil +} + +func (*memStorage) Close() error { return nil } + +type memFile struct { + bytes.Buffer + open bool +} + +type memReader struct { + *bytes.Reader + ms *memStorage + m *memFile + closed bool +} + +func (mr *memReader) Close() error { + mr.ms.mu.Lock() + defer mr.ms.mu.Unlock() + if mr.closed { + return ErrClosed + } + mr.m.open = false + return nil +} + +type memWriter struct { + *memFile + ms *memStorage + closed bool +} + +func (*memWriter) Sync() error { return nil } + +func (mw *memWriter) Close() error { + mw.ms.mu.Lock() + defer mw.ms.mu.Unlock() + if mw.closed { + return ErrClosed + } + mw.memFile.open = false + return nil +} + +func packFile(fd FileDesc) uint64 { + return uint64(fd.Num)<> typeShift)} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go new file mode 100644 index 00000000000..4e4a724258d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -0,0 +1,187 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package storage provides storage abstraction for LevelDB. +package storage + +import ( + "errors" + "fmt" + "io" +) + +// FileType represent a file type. +type FileType int + +// File types. +const ( + TypeManifest FileType = 1 << iota + TypeJournal + TypeTable + TypeTemp + + TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp +) + +func (t FileType) String() string { + switch t { + case TypeManifest: + return "manifest" + case TypeJournal: + return "journal" + case TypeTable: + return "table" + case TypeTemp: + return "temp" + } + return fmt.Sprintf("", t) +} + +// Common error. +var ( + ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") + ErrLocked = errors.New("leveldb/storage: already locked") + ErrClosed = errors.New("leveldb/storage: closed") +) + +// ErrCorrupted is the type that wraps errors that indicate corruption of +// a file. Package storage has its own type instead of using +// errors.ErrCorrupted to prevent circular import. +type ErrCorrupted struct { + Fd FileDesc + Err error +} + +func isCorrupted(err error) bool { + switch err.(type) { + case *ErrCorrupted: + return true + } + return false +} + +func (e *ErrCorrupted) Error() string { + if !e.Fd.Zero() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) + } + return e.Err.Error() +} + +// Syncer is the interface that wraps basic Sync method. +type Syncer interface { + // Sync commits the current contents of the file to stable storage. + Sync() error +} + +// Reader is the interface that groups the basic Read, Seek, ReadAt and Close +// methods. +type Reader interface { + io.ReadSeeker + io.ReaderAt + io.Closer +} + +// Writer is the interface that groups the basic Write, Sync and Close +// methods. +type Writer interface { + io.WriteCloser + Syncer +} + +// Locker is the interface that wraps Unlock method. +type Locker interface { + Unlock() +} + +// FileDesc is a 'file descriptor'. +type FileDesc struct { + Type FileType + Num int64 +} + +func (fd FileDesc) String() string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + return fmt.Sprintf("%#x-%d", fd.Type, fd.Num) + } +} + +// Zero returns true if fd == (FileDesc{}). +func (fd FileDesc) Zero() bool { + return fd == (FileDesc{}) +} + +// FileDescOk returns true if fd is a valid 'file descriptor'. +func FileDescOk(fd FileDesc) bool { + switch fd.Type { + case TypeManifest: + case TypeJournal: + case TypeTable: + case TypeTemp: + default: + return false + } + return fd.Num >= 0 +} + +// Storage is the storage. A storage instance must be safe for concurrent use. +type Storage interface { + // Lock locks the storage. Any subsequent attempt to call Lock will fail + // until the last lock released. + // Caller should call Unlock method after use. + Lock() (Locker, error) + + // Log logs a string. This is used for logging. + // An implementation may write to a file, stdout or simply do nothing. + Log(str string) + + // SetMeta store 'file descriptor' that can later be acquired using GetMeta + // method. The 'file descriptor' should point to a valid file. + // SetMeta should be implemented in such way that changes should happen + // atomically. + SetMeta(fd FileDesc) error + + // GetMeta returns 'file descriptor' stored in meta. The 'file descriptor' + // can be updated using SetMeta method. + // Returns os.ErrNotExist if meta doesn't store any 'file descriptor', or + // 'file descriptor' point to nonexistent file. + GetMeta() (FileDesc, error) + + // List returns file descriptors that match the given file types. + // The file types may be OR'ed together. + List(ft FileType) ([]FileDesc, error) + + // Open opens file with the given 'file descriptor' read-only. + // Returns os.ErrNotExist error if the file does not exist. + // Returns ErrClosed if the underlying storage is closed. + Open(fd FileDesc) (Reader, error) + + // Create creates file with the given 'file descriptor', truncate if already + // exist and opens write-only. + // Returns ErrClosed if the underlying storage is closed. + Create(fd FileDesc) (Writer, error) + + // Remove removes file with the given 'file descriptor'. + // Returns ErrClosed if the underlying storage is closed. + Remove(fd FileDesc) error + + // Rename renames file from oldfd to newfd. + // Returns ErrClosed if the underlying storage is closed. + Rename(oldfd, newfd FileDesc) error + + // Close closes the storage. + // It is valid to call Close multiple times. Other methods should not be + // called after the storage has been closed. + Close() error +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go new file mode 100644 index 00000000000..adf773f13f8 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go @@ -0,0 +1,529 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sort" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/table" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// tFile holds basic information about a table. +type tFile struct { + fd storage.FileDesc + seekLeft int32 + size int64 + imin, imax internalKey +} + +// Returns true if given key is after largest key of this table. +func (t *tFile) after(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 +} + +// Returns true if given key is before smallest key of this table. +func (t *tFile) before(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 +} + +// Returns true if given key range overlaps with this table key range. +func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { + return !t.after(icmp, umin) && !t.before(icmp, umax) +} + +// Cosumes one seek and return current seeks left. +func (t *tFile) consumeSeek() int32 { + return atomic.AddInt32(&t.seekLeft, -1) +} + +// Creates new tFile. +func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { + f := &tFile{ + fd: fd, + size: size, + imin: imin, + imax: imax, + } + + // We arrange to automatically compact this file after + // a certain number of seeks. Let's assume: + // (1) One seek costs 10ms + // (2) Writing or reading 1MB costs 10ms (100MB/s) + // (3) A compaction of 1MB does 25MB of IO: + // 1MB read from this level + // 10-12MB read from next level (boundaries may be misaligned) + // 10-12MB written to next level + // This implies that 25 seeks cost the same as the compaction + // of 1MB of data. I.e., one seek costs approximately the + // same as the compaction of 40KB of data. We are a little + // conservative and allow approximately one seek for every 16KB + // of data before triggering a compaction. + f.seekLeft = int32(size / 16384) + if f.seekLeft < 100 { + f.seekLeft = 100 + } + + return f +} + +func tableFileFromRecord(r atRecord) *tFile { + return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax) +} + +// tFiles hold multiple tFile. +type tFiles []*tFile + +func (tf tFiles) Len() int { return len(tf) } +func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } + +func (tf tFiles) nums() string { + x := "[ " + for i, f := range tf { + if i != 0 { + x += ", " + } + x += fmt.Sprint(f.fd.Num) + } + x += " ]" + return x +} + +// Returns true if i smallest key is less than j. +// This used for sort by key in ascending order. +func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { + a, b := tf[i], tf[j] + n := icmp.Compare(a.imin, b.imin) + if n == 0 { + return a.fd.Num < b.fd.Num + } + return n < 0 +} + +// Returns true if i file number is greater than j. +// This used for sort by file number in descending order. +func (tf tFiles) lessByNum(i, j int) bool { + return tf[i].fd.Num > tf[j].fd.Num +} + +// Sorts tables by key in ascending order. +func (tf tFiles) sortByKey(icmp *iComparer) { + sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) +} + +// Sorts tables by file number in descending order. +func (tf tFiles) sortByNum() { + sort.Sort(&tFilesSortByNum{tFiles: tf}) +} + +// Returns sum of all tables size. +func (tf tFiles) size() (sum int64) { + for _, t := range tf { + sum += t.size + } + return sum +} + +// Searches smallest index of tables whose its smallest +// key is after or equal with given key. +func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imin, ikey) >= 0 + }) +} + +// Searches smallest index of tables whose its largest +// key is after or equal with given key. +func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imax, ikey) >= 0 + }) +} + +// Returns true if given key range overlaps with one or more +// tables key range. If unsorted is true then binary search will not be used. +func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { + if unsorted { + // Check against all files. + for _, t := range tf { + if t.overlaps(icmp, umin, umax) { + return true + } + } + return false + } + + i := 0 + if len(umin) > 0 { + // Find the earliest possible internal key for min. + i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) + } + if i >= len(tf) { + // Beginning of range is after all files, so no overlap. + return false + } + return !tf[i].before(icmp, umax) +} + +// Returns tables whose its key range overlaps with given key range. +// Range will be expanded if ukey found hop across tables. +// If overlapped is true then the search will be restarted if umax +// expanded. +// The dst content will be overwritten. +func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { + dst = dst[:0] + for i := 0; i < len(tf); { + t := tf[i] + if t.overlaps(icmp, umin, umax) { + if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { + umin = t.imin.ukey() + dst = dst[:0] + i = 0 + continue + } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { + umax = t.imax.ukey() + // Restart search if it is overlapped. + if overlapped { + dst = dst[:0] + i = 0 + continue + } + } + + dst = append(dst, t) + } + i++ + } + + return dst +} + +// Returns tables key range. +func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { + for i, t := range tf { + if i == 0 { + imin, imax = t.imin, t.imax + continue + } + if icmp.Compare(t.imin, imin) < 0 { + imin = t.imin + } + if icmp.Compare(t.imax, imax) > 0 { + imax = t.imax + } + } + + return +} + +// Creates iterator index from tables. +func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { + if slice != nil { + var start, limit int + if slice.Start != nil { + start = tf.searchMax(icmp, internalKey(slice.Start)) + } + if slice.Limit != nil { + limit = tf.searchMin(icmp, internalKey(slice.Limit)) + } else { + limit = tf.Len() + } + tf = tf[start:limit] + } + return iterator.NewArrayIndexer(&tFilesArrayIndexer{ + tFiles: tf, + tops: tops, + icmp: icmp, + slice: slice, + ro: ro, + }) +} + +// Tables iterator index. +type tFilesArrayIndexer struct { + tFiles + tops *tOps + icmp *iComparer + slice *util.Range + ro *opt.ReadOptions +} + +func (a *tFilesArrayIndexer) Search(key []byte) int { + return a.searchMax(a.icmp, internalKey(key)) +} + +func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { + if i == 0 || i == a.Len()-1 { + return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) + } + return a.tops.newIterator(a.tFiles[i], nil, a.ro) +} + +// Helper type for sortByKey. +type tFilesSortByKey struct { + tFiles + icmp *iComparer +} + +func (x *tFilesSortByKey) Less(i, j int) bool { + return x.lessByKey(x.icmp, i, j) +} + +// Helper type for sortByNum. +type tFilesSortByNum struct { + tFiles +} + +func (x *tFilesSortByNum) Less(i, j int) bool { + return x.lessByNum(i, j) +} + +// Table operations. +type tOps struct { + s *session + noSync bool + cache *cache.Cache + bcache *cache.Cache + bpool *util.BufferPool +} + +// Creates an empty table and returns table writer. +func (t *tOps) create() (*tWriter, error) { + fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()} + fw, err := t.s.stor.Create(fd) + if err != nil { + return nil, err + } + return &tWriter{ + t: t, + fd: fd, + w: fw, + tw: table.NewWriter(fw, t.s.o.Options), + }, nil +} + +// Builds table from src iterator. +func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { + w, err := t.create() + if err != nil { + return + } + + defer func() { + if err != nil { + w.drop() + } + }() + + for src.Next() { + err = w.append(src.Key(), src.Value()) + if err != nil { + return + } + } + err = src.Error() + if err != nil { + return + } + + n = w.tw.EntriesLen() + f, err = w.finish() + return +} + +// Opens table. It returns a cache handle, which should +// be released after use. +func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { + ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { + var r storage.Reader + r, err = t.s.stor.Open(f.fd) + if err != nil { + return 0, nil + } + + var bcache *cache.NamespaceGetter + if t.bcache != nil { + bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} + } + + var tr *table.Reader + tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) + if err != nil { + r.Close() + return 0, nil + } + return 1, tr + + }) + if ch == nil && err == nil { + err = ErrClosed + } + return +} + +// Finds key/value pair whose key is greater than or equal to the +// given key. +func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { + ch, err := t.open(f) + if err != nil { + return nil, nil, err + } + defer ch.Release() + return ch.Value().(*table.Reader).Find(key, true, ro) +} + +// Finds key that is greater than or equal to the given key. +func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { + ch, err := t.open(f) + if err != nil { + return nil, err + } + defer ch.Release() + return ch.Value().(*table.Reader).FindKey(key, true, ro) +} + +// Returns approximate offset of the given key. +func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { + ch, err := t.open(f) + if err != nil { + return + } + defer ch.Release() + return ch.Value().(*table.Reader).OffsetOf(key) +} + +// Creates an iterator from the given table. +func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + ch, err := t.open(f) + if err != nil { + return iterator.NewEmptyIterator(err) + } + iter := ch.Value().(*table.Reader).NewIterator(slice, ro) + iter.SetReleaser(ch) + return iter +} + +// Removes table from persistent storage. It waits until +// no one use the the table. +func (t *tOps) remove(f *tFile) { + t.cache.Delete(0, uint64(f.fd.Num), func() { + if err := t.s.stor.Remove(f.fd); err != nil { + t.s.logf("table@remove removing @%d %q", f.fd.Num, err) + } else { + t.s.logf("table@remove removed @%d", f.fd.Num) + } + if t.bcache != nil { + t.bcache.EvictNS(uint64(f.fd.Num)) + } + }) +} + +// Closes the table ops instance. It will close all tables, +// regadless still used or not. +func (t *tOps) close() { + t.bpool.Close() + t.cache.Close() + if t.bcache != nil { + t.bcache.CloseWeak() + } +} + +// Creates new initialized table ops instance. +func newTableOps(s *session) *tOps { + var ( + cacher cache.Cacher + bcache *cache.Cache + bpool *util.BufferPool + ) + if s.o.GetOpenFilesCacheCapacity() > 0 { + cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) + } + if !s.o.GetDisableBlockCache() { + var bcacher cache.Cacher + if s.o.GetBlockCacheCapacity() > 0 { + bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) + } + bcache = cache.NewCache(bcacher) + } + if !s.o.GetDisableBufferPool() { + bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) + } + return &tOps{ + s: s, + noSync: s.o.GetNoSync(), + cache: cache.NewCache(cacher), + bcache: bcache, + bpool: bpool, + } +} + +// tWriter wraps the table writer. It keep track of file descriptor +// and added key range. +type tWriter struct { + t *tOps + + fd storage.FileDesc + w storage.Writer + tw *table.Writer + + first, last []byte +} + +// Append key/value pair to the table. +func (w *tWriter) append(key, value []byte) error { + if w.first == nil { + w.first = append([]byte{}, key...) + } + w.last = append(w.last[:0], key...) + return w.tw.Append(key, value) +} + +// Returns true if the table is empty. +func (w *tWriter) empty() bool { + return w.first == nil +} + +// Closes the storage.Writer. +func (w *tWriter) close() { + if w.w != nil { + w.w.Close() + w.w = nil + } +} + +// Finalizes the table and returns table file. +func (w *tWriter) finish() (f *tFile, err error) { + defer w.close() + err = w.tw.Close() + if err != nil { + return + } + if !w.t.noSync { + err = w.w.Sync() + if err != nil { + return + } + } + f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) + return +} + +// Drops the table. +func (w *tWriter) drop() { + w.close() + w.t.s.stor.Remove(w.fd) + w.t.s.reuseFileNum(w.fd.Num) + w.tw = nil + w.first = nil + w.last = nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go new file mode 100644 index 00000000000..16cfbaa0068 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go @@ -0,0 +1,1135 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "fmt" + "io" + "sort" + "strings" + "sync" + + "github.com/golang/snappy" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Reader errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrReaderReleased = errors.New("leveldb/table: reader released") + ErrIterReleased = errors.New("leveldb/table: iterator released") +) + +// ErrCorrupted describes error due to corruption. This error will be wrapped +// with errors.ErrCorrupted. +type ErrCorrupted struct { + Pos int64 + Size int64 + Kind string + Reason string +} + +func (e *ErrCorrupted) Error() string { + return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) +} + +func max(x, y int) int { + if x > y { + return x + } + return y +} + +type block struct { + bpool *util.BufferPool + bh blockHandle + data []byte + restartsLen int + restartsOffset int +} + +func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { + index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) + offset++ // shared always zero, since this is a restart point + v1, n1 := binary.Uvarint(b.data[offset:]) // key length + _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length + m := offset + n1 + n2 + return cmp.Compare(b.data[m:m+int(v1)], key) > 0 + }) + rstart - 1 + if index < rstart { + // The smallest key is greater-than key sought. + index = rstart + } + offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) + return +} + +func (b *block) restartIndex(rstart, rlimit, offset int) int { + return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset + }) + rstart - 1 +} + +func (b *block) restartOffset(index int) int { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) +} + +func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { + if offset >= b.restartsOffset { + if offset != b.restartsOffset { + err = &ErrCorrupted{Reason: "entries offset not aligned"} + } + return + } + v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length + v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length + v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length + m := n0 + n1 + n2 + n = m + int(v1) + int(v2) + if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { + err = &ErrCorrupted{Reason: "entries corrupted"} + return + } + key = b.data[offset+m : offset+m+int(v1)] + value = b.data[offset+m+int(v1) : offset+n] + nShared = int(v0) + return +} + +func (b *block) Release() { + b.bpool.Put(b.data) + b.bpool = nil + b.data = nil +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type blockIter struct { + tr *Reader + block *block + blockReleaser util.Releaser + releaser util.Releaser + key, value []byte + offset int + // Previous offset, only filled by Next. + prevOffset int + prevNode []int + prevKeys []byte + restartIndex int + // Iterator direction. + dir dir + // Restart index slice range. + riStart int + riLimit int + // Offset slice range. + offsetStart int + offsetRealStart int + offsetLimit int + // Error. + err error +} + +func (i *blockIter) sErr(err error) { + i.err = err + i.key = nil + i.value = nil + i.prevNode = nil + i.prevKeys = nil +} + +func (i *blockIter) reset() { + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.restartIndex = i.riStart + i.offset = i.offsetStart + i.dir = dirSOI + i.key = i.key[:0] + i.value = nil +} + +func (i *blockIter) isFirst() bool { + switch i.dir { + case dirForward: + return i.prevOffset == i.offsetRealStart + case dirBackward: + return len(i.prevNode) == 1 && i.restartIndex == i.riStart + } + return false +} + +func (i *blockIter) isLast() bool { + switch i.dir { + case dirForward, dirBackward: + return i.offset == i.offsetLimit + } + return false +} + +func (i *blockIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirSOI + return i.Next() +} + +func (i *blockIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirEOI + return i.Prev() +} + +func (i *blockIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) + if err != nil { + i.sErr(err) + return false + } + i.restartIndex = ri + i.offset = max(i.offsetStart, offset) + if i.dir == dirSOI || i.dir == dirEOI { + i.dir = dirForward + } + for i.Next() { + if i.tr.cmp.Compare(i.key, key) >= 0 { + return true + } + } + return false +} + +func (i *blockIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirSOI { + i.restartIndex = i.riStart + i.offset = i.offsetStart + } else if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + for i.offset < i.offsetRealStart { + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.offset += n + } + if i.offset >= i.offsetLimit { + i.dir = dirEOI + if i.offset != i.offsetLimit { + i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) + } + return false + } + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.prevOffset = i.offset + i.offset += n + i.dir = dirForward + return true +} + +func (i *blockIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + var ri int + if i.dir == dirForward { + // Change direction. + i.offset = i.prevOffset + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) + i.dir = dirBackward + } else if i.dir == dirEOI { + // At the end of iterator. + i.restartIndex = i.riLimit + i.offset = i.offsetLimit + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.riLimit - 1 + i.dir = dirBackward + } else if len(i.prevNode) == 1 { + // This is the end of a restart range. + i.offset = i.prevNode[0] + i.prevNode = i.prevNode[:0] + if i.restartIndex == i.riStart { + i.dir = dirSOI + return false + } + i.restartIndex-- + ri = i.restartIndex + } else { + // In the middle of restart range, get from cache. + n := len(i.prevNode) - 3 + node := i.prevNode[n:] + i.prevNode = i.prevNode[:n] + // Get the key. + ko := node[0] + i.key = append(i.key[:0], i.prevKeys[ko:]...) + i.prevKeys = i.prevKeys[:ko] + // Get the value. + vo := node[1] + vl := vo + node[2] + i.value = i.block.data[vo:vl] + i.offset = vl + return true + } + // Build entries cache. + i.key = i.key[:0] + i.value = nil + offset := i.block.restartOffset(ri) + if offset == i.offset { + ri-- + if ri < 0 { + i.dir = dirSOI + return false + } + offset = i.block.restartOffset(ri) + } + i.prevNode = append(i.prevNode, offset) + for { + key, value, nShared, n, err := i.block.entry(offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if offset >= i.offsetRealStart { + if i.value != nil { + // Appends 3 variables: + // 1. Previous keys offset + // 2. Value offset in the data block + // 3. Value length + i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) + i.prevKeys = append(i.prevKeys, i.key...) + } + i.value = value + } + i.key = append(i.key[:nShared], key...) + offset += n + // Stop if target offset reached. + if offset >= i.offset { + if offset != i.offset { + i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) + return false + } + + break + } + } + i.restartIndex = ri + i.offset = offset + return true +} + +func (i *blockIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *blockIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *blockIter) Release() { + if i.dir != dirReleased { + i.tr = nil + i.block = nil + i.prevNode = nil + i.prevKeys = nil + i.key = nil + i.value = nil + i.dir = dirReleased + if i.blockReleaser != nil { + i.blockReleaser.Release() + i.blockReleaser = nil + } + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *blockIter) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *blockIter) Valid() bool { + return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) +} + +func (i *blockIter) Error() error { + return i.err +} + +type filterBlock struct { + bpool *util.BufferPool + data []byte + oOffset int + baseLg uint + filtersNum int +} + +func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { + i := int(offset >> b.baseLg) + if i < b.filtersNum { + o := b.data[b.oOffset+i*4:] + n := int(binary.LittleEndian.Uint32(o)) + m := int(binary.LittleEndian.Uint32(o[4:])) + if n < m && m <= b.oOffset { + return filter.Contains(b.data[n:m], key) + } else if n == m { + return false + } + } + return true +} + +func (b *filterBlock) Release() { + b.bpool.Put(b.data) + b.bpool = nil + b.data = nil +} + +type indexIter struct { + *blockIter + tr *Reader + slice *util.Range + // Options + fillCache bool +} + +func (i *indexIter) Get() iterator.Iterator { + value := i.Value() + if value == nil { + return nil + } + dataBH, n := decodeBlockHandle(value) + if n == 0 { + return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) + } + + var slice *util.Range + if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { + slice = i.slice + } + return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) +} + +// Reader is a table reader. +type Reader struct { + mu sync.RWMutex + fd storage.FileDesc + reader io.ReaderAt + cache *cache.NamespaceGetter + err error + bpool *util.BufferPool + // Options + o *opt.Options + cmp comparer.Comparer + filter filter.Filter + verifyChecksum bool + + dataEnd int64 + metaBH, indexBH, filterBH blockHandle + indexBlock *block + filterBlock *filterBlock +} + +func (r *Reader) blockKind(bh blockHandle) string { + switch bh.offset { + case r.metaBH.offset: + return "meta-block" + case r.indexBH.offset: + return "index-block" + case r.filterBH.offset: + if r.filterBH.length > 0 { + return "filter-block" + } + } + return "data-block" +} + +func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { + return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} +} + +func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { + return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) +} + +func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { + if cerr, ok := err.(*ErrCorrupted); ok { + cerr.Pos = int64(bh.offset) + cerr.Size = int64(bh.length) + cerr.Kind = r.blockKind(bh) + return &errors.ErrCorrupted{Fd: r.fd, Err: cerr} + } + return err +} + +func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { + data := r.bpool.Get(int(bh.length + blockTrailerLen)) + if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { + return nil, err + } + + if verifyChecksum { + n := bh.length + 1 + checksum0 := binary.LittleEndian.Uint32(data[n:]) + checksum1 := util.NewCRC(data[:n]).Value() + if checksum0 != checksum1 { + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) + } + } + + switch data[bh.length] { + case blockTypeNoCompression: + data = data[:bh.length] + case blockTypeSnappyCompression: + decLen, err := snappy.DecodedLen(data[:bh.length]) + if err != nil { + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, err.Error()) + } + decData := r.bpool.Get(decLen) + decData, err = snappy.Decode(decData, data[:bh.length]) + r.bpool.Put(data) + if err != nil { + r.bpool.Put(decData) + return nil, r.newErrCorruptedBH(bh, err.Error()) + } + data = decData + default: + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) + } + return data, nil +} + +func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { + data, err := r.readRawBlock(bh, verifyChecksum) + if err != nil { + return nil, err + } + restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) + b := &block{ + bpool: r.bpool, + bh: bh, + data: data, + restartsLen: restartsLen, + restartsOffset: len(data) - (restartsLen+1)*4, + } + return b, nil +} + +func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { + if r.cache != nil { + var ( + err error + ch *cache.Handle + ) + if fillCache { + ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { + var b *block + b, err = r.readBlock(bh, verifyChecksum) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + } else { + ch = r.cache.Get(bh.offset, nil) + } + if ch != nil { + b, ok := ch.Value().(*block) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: inconsistent block type") + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readBlock(bh, verifyChecksum) + return b, b, err +} + +func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { + data, err := r.readRawBlock(bh, true) + if err != nil { + return nil, err + } + n := len(data) + if n < 5 { + return nil, r.newErrCorruptedBH(bh, "too short") + } + m := n - 5 + oOffset := int(binary.LittleEndian.Uint32(data[m:])) + if oOffset > m { + return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") + } + b := &filterBlock{ + bpool: r.bpool, + data: data, + oOffset: oOffset, + baseLg: uint(data[n-1]), + filtersNum: (m - oOffset) / 4, + } + return b, nil +} + +func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { + if r.cache != nil { + var ( + err error + ch *cache.Handle + ) + if fillCache { + ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { + var b *filterBlock + b, err = r.readFilterBlock(bh) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + } else { + ch = r.cache.Get(bh.offset, nil) + } + if ch != nil { + b, ok := ch.Value().(*filterBlock) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: inconsistent block type") + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readFilterBlock(bh) + return b, b, err +} + +func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { + if r.indexBlock == nil { + return r.readBlockCached(r.indexBH, true, fillCache) + } + return r.indexBlock, util.NoopReleaser{}, nil +} + +func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { + if r.filterBlock == nil { + return r.readFilterBlockCached(r.filterBH, fillCache) + } + return r.filterBlock, util.NoopReleaser{}, nil +} + +func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { + bi := &blockIter{ + tr: r, + block: b, + blockReleaser: bReleaser, + // Valid key should never be nil. + key: make([]byte, 0), + dir: dirSOI, + riStart: 0, + riLimit: b.restartsLen, + offsetStart: 0, + offsetRealStart: 0, + offsetLimit: b.restartsOffset, + } + if slice != nil { + if slice.Start != nil { + if bi.Seek(slice.Start) { + bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) + bi.offsetStart = b.restartOffset(bi.riStart) + bi.offsetRealStart = bi.prevOffset + } else { + bi.riStart = b.restartsLen + bi.offsetStart = b.restartsOffset + bi.offsetRealStart = b.restartsOffset + } + } + if slice.Limit != nil { + if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { + bi.offsetLimit = bi.prevOffset + bi.riLimit = bi.restartIndex + 1 + } + } + bi.reset() + if bi.offsetStart > bi.offsetLimit { + bi.sErr(errors.New("leveldb/table: invalid slice range")) + } + } + return bi +} + +func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { + b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + return r.newBlockIter(b, rel, slice, false) +} + +func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + return iterator.NewEmptyIterator(r.err) + } + + return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) +} + +// NewIterator creates an iterator from the table. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// table. And a nil Range.Limit is treated as a key after all keys in +// the table. +// +// The returned iterator is not safe for concurrent use and should be released +// after use. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + return iterator.NewEmptyIterator(r.err) + } + + fillCache := !ro.GetDontFillCache() + indexBlock, rel, err := r.getIndexBlock(fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + index := &indexIter{ + blockIter: r.newBlockIter(indexBlock, rel, slice, true), + tr: r, + slice: slice, + fillCache: !ro.GetDontFillCache(), + } + return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) +} + +func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.getIndexBlock(true) + if err != nil { + return + } + defer rel.Release() + + index := r.newBlockIter(indexBlock, nil, nil, true) + defer index.Release() + + if !index.Seek(key) { + if err = index.Error(); err == nil { + err = ErrNotFound + } + return + } + + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return nil, nil, r.err + } + + // The filter should only used for exact match. + if filtered && r.filter != nil { + filterBlock, frel, ferr := r.getFilterBlock(true) + if ferr == nil { + if !filterBlock.contains(r.filter, dataBH.offset, key) { + frel.Release() + return nil, nil, ErrNotFound + } + frel.Release() + } else if !errors.IsCorrupted(ferr) { + return nil, nil, ferr + } + } + + data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) + if !data.Seek(key) { + data.Release() + if err = data.Error(); err != nil { + return + } + + // The nearest greater-than key is the first key of the next block. + if !index.Next() { + if err = index.Error(); err == nil { + err = ErrNotFound + } + return + } + + dataBH, n = decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return nil, nil, r.err + } + + data = r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) + if !data.Next() { + data.Release() + if err = data.Error(); err == nil { + err = ErrNotFound + } + return + } + } + + // Key doesn't use block buffer, no need to copy the buffer. + rkey = data.Key() + if !noValue { + if r.bpool == nil { + value = data.Value() + } else { + // Value does use block buffer, and since the buffer will be + // recycled, it need to be copied. + value = append([]byte{}, data.Value()...) + } + } + data.Release() + return +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// If filtered is true then the nearest 'block' will be checked against +// 'filter data' (if present) and will immediately return ErrNotFound if +// 'filter data' indicates that such pair doesn't exist. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { + return r.find(key, filtered, ro, false) +} + +// FindKey finds key that is greater than or equal to the given key. +// It returns ErrNotFound if the table doesn't contain such key. +// If filtered is true then the nearest 'block' will be checked against +// 'filter data' (if present) and will immediately return ErrNotFound if +// 'filter data' indicates that such key doesn't exist. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { + rkey, _, err = r.find(key, filtered, ro, true) + return +} + +// Get gets the value for the given key. It returns errors.ErrNotFound +// if the table does not contain the key. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + rkey, value, err := r.find(key, false, ro, false) + if err == nil && r.cmp.Compare(rkey, key) != 0 { + value = nil + err = ErrNotFound + } + return +} + +// OffsetOf returns approximate offset for the given key. +// +// It is safe to modify the contents of the argument after Get returns. +func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) + if err != nil { + return + } + defer rel.Release() + + index := r.newBlockIter(indexBlock, nil, nil, true) + defer index.Release() + if index.Seek(key) { + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return + } + offset = int64(dataBH.offset) + return + } + err = index.Error() + if err == nil { + offset = r.dataEnd + } + return +} + +// Release implements util.Releaser. +// It also close the file if it is an io.Closer. +func (r *Reader) Release() { + r.mu.Lock() + defer r.mu.Unlock() + + if closer, ok := r.reader.(io.Closer); ok { + closer.Close() + } + if r.indexBlock != nil { + r.indexBlock.Release() + r.indexBlock = nil + } + if r.filterBlock != nil { + r.filterBlock.Release() + r.filterBlock = nil + } + r.reader = nil + r.cache = nil + r.bpool = nil + r.err = ErrReaderReleased +} + +// NewReader creates a new initialized table reader for the file. +// The fi, cache and bpool is optional and can be nil. +// +// The returned table reader instance is safe for concurrent use. +func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { + if f == nil { + return nil, errors.New("leveldb/table: nil file") + } + + r := &Reader{ + fd: fd, + reader: f, + cache: cache, + bpool: bpool, + o: o, + cmp: o.GetComparer(), + verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), + } + + if size < footerLen { + r.err = r.newErrCorrupted(0, size, "table", "too small") + return r, nil + } + + footerPos := size - footerLen + var footer [footerLen]byte + if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { + return nil, err + } + if string(footer[footerLen-len(magic):footerLen]) != magic { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") + return r, nil + } + + var n int + // Decode the metaindex block handle. + r.metaBH, n = decodeBlockHandle(footer[:]) + if n == 0 { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") + return r, nil + } + + // Decode the index block handle. + r.indexBH, n = decodeBlockHandle(footer[n:]) + if n == 0 { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") + return r, nil + } + + // Read metaindex block. + metaBlock, err := r.readBlock(r.metaBH, true) + if err != nil { + if errors.IsCorrupted(err) { + r.err = err + return r, nil + } + return nil, err + } + + // Set data end. + r.dataEnd = int64(r.metaBH.offset) + + // Read metaindex. + metaIter := r.newBlockIter(metaBlock, nil, nil, true) + for metaIter.Next() { + key := string(metaIter.Key()) + if !strings.HasPrefix(key, "filter.") { + continue + } + fn := key[7:] + if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { + r.filter = f0 + } else { + for _, f0 := range o.GetAltFilters() { + if f0.Name() == fn { + r.filter = f0 + break + } + } + } + if r.filter != nil { + filterBH, n := decodeBlockHandle(metaIter.Value()) + if n == 0 { + continue + } + r.filterBH = filterBH + // Update data end. + r.dataEnd = int64(filterBH.offset) + break + } + } + metaIter.Release() + metaBlock.Release() + + // Cache index and filter block locally, since we don't have global cache. + if cache == nil { + r.indexBlock, err = r.readBlock(r.indexBH, true) + if err != nil { + if errors.IsCorrupted(err) { + r.err = err + return r, nil + } + return nil, err + } + if r.filter != nil { + r.filterBlock, err = r.readFilterBlock(r.filterBH) + if err != nil { + if !errors.IsCorrupted(err) { + return nil, err + } + + // Don't use filter then. + r.filter = nil + } + } + } + + return r, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go new file mode 100644 index 00000000000..beacdc1f024 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go @@ -0,0 +1,177 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package table allows read and write sorted key/value. +package table + +import ( + "encoding/binary" +) + +/* +Table: + +Table is consist of one or more data blocks, an optional filter block +a metaindex block, an index block and a table footer. Metaindex block +is a special block used to keep parameters of the table, such as filter +block name and its block handle. Index block is a special block used to +keep record of data blocks offset and length, index block use one as +restart interval. The key used by index block are the last key of preceding +block, shorter separator of adjacent blocks or shorter successor of the +last key of the last block. Filter block is an optional block contains +sequence of filter data generated by a filter generator. + +Table data structure: + + optional + / + +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ + | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | + +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ + + Each block followed by a 5-bytes trailer contains compression type and checksum. + +Table block trailer: + + +---------------------------+-------------------+ + | compression type (1-byte) | checksum (4-byte) | + +---------------------------+-------------------+ + + The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression + type also included in the checksum. + +Table footer: + + +------------------- 40-bytes -------------------+ + / \ + +------------------------+--------------------+------+-----------------+ + | metaindex block handle / index block handle / ---- | magic (8-bytes) | + +------------------------+--------------------+------+-----------------+ + + The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Block: + +Block is consist of one or more key/value entries and a block trailer. +Block entry shares key prefix with its preceding key until a restart +point reached. A block should contains at least one restart point. +First restart point are always zero. + +Block data structure: + + + restart point + restart point (depends on restart interval) + / / + +---------------+---------------+---------------+---------------+---------+ + | block entry 1 | block entry 2 | ... | block entry n | trailer | + +---------------+---------------+---------------+---------------+---------+ + +Key/value entry: + + +---- key len ----+ + / \ + +-------+---------+-----------+---------+--------------------+--------------+----------------+ + | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | + +-----------------+---------------------+--------------------+--------------+----------------+ + + Block entry shares key prefix with its preceding key: + Conditions: + restart_interval=2 + entry one : key=deck,value=v1 + entry two : key=dock,value=v2 + entry three: key=duck,value=v3 + The entries will be encoded as follow: + + + restart point (offset=0) + restart point (offset=16) + / / + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + \ / \ / \ / + +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ + + The block trailer will contains two restart points: + + +------------+-----------+--------+ + | 0 | 16 | 2 | + +------------+-----------+---+----+ + \ / \ + +-- restart points --+ + restart points length + +Block trailer: + + +-- 4-bytes --+ + / \ + +-----------------+-----------------+-----------------+------------------------------+ + | restart point 1 | .... | restart point n | restart points len (4-bytes) | + +-----------------+-----------------+-----------------+------------------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Filter block: + +Filter block consist of one or more filter data and a filter block trailer. +The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. + +Filter block data structure: + + + offset 1 + offset 2 + offset n + trailer offset + / / / / + +---------------+---------------+---------------+---------+ + | filter data 1 | ... | filter data n | trailer | + +---------------+---------------+---------------+---------+ + +Filter block trailer: + + +- 4-bytes -+ + / \ + +---------------+---------------+---------------+-------------------------------+------------------+ + | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | + +-------------- +---------------+---------------+-------------------------------+------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +const ( + blockTrailerLen = 5 + footerLen = 48 + + magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" + + // The block type gives the per-block compression format. + // These constants are part of the file format and should not be changed. + blockTypeNoCompression = 0 + blockTypeSnappyCompression = 1 + + // Generate new filter every 2KB of data + filterBaseLg = 11 + filterBase = 1 << filterBaseLg +) + +type blockHandle struct { + offset, length uint64 +} + +func decodeBlockHandle(src []byte) (blockHandle, int) { + offset, n := binary.Uvarint(src) + length, m := binary.Uvarint(src[n:]) + if n == 0 || m == 0 { + return blockHandle{}, 0 + } + return blockHandle{offset, length}, n + m +} + +func encodeBlockHandle(dst []byte, b blockHandle) int { + n := binary.PutUvarint(dst, b.offset) + m := binary.PutUvarint(dst[n:], b.length) + return n + m +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go new file mode 100644 index 00000000000..b96b271d8dd --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go @@ -0,0 +1,375 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/golang/snappy" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +func sharedPrefixLen(a, b []byte) int { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for i < n && a[i] == b[i] { + i++ + } + return i +} + +type blockWriter struct { + restartInterval int + buf util.Buffer + nEntries int + prevKey []byte + restarts []uint32 + scratch []byte +} + +func (w *blockWriter) append(key, value []byte) { + nShared := 0 + if w.nEntries%w.restartInterval == 0 { + w.restarts = append(w.restarts, uint32(w.buf.Len())) + } else { + nShared = sharedPrefixLen(w.prevKey, key) + } + n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) + w.buf.Write(w.scratch[:n]) + w.buf.Write(key[nShared:]) + w.buf.Write(value) + w.prevKey = append(w.prevKey[:0], key...) + w.nEntries++ +} + +func (w *blockWriter) finish() { + // Write restarts entry. + if w.nEntries == 0 { + // Must have at least one restart entry. + w.restarts = append(w.restarts, 0) + } + w.restarts = append(w.restarts, uint32(len(w.restarts))) + for _, x := range w.restarts { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } +} + +func (w *blockWriter) reset() { + w.buf.Reset() + w.nEntries = 0 + w.restarts = w.restarts[:0] +} + +func (w *blockWriter) bytesLen() int { + restartsLen := len(w.restarts) + if restartsLen == 0 { + restartsLen = 1 + } + return w.buf.Len() + 4*restartsLen + 4 +} + +type filterWriter struct { + generator filter.FilterGenerator + buf util.Buffer + nKeys int + offsets []uint32 +} + +func (w *filterWriter) add(key []byte) { + if w.generator == nil { + return + } + w.generator.Add(key) + w.nKeys++ +} + +func (w *filterWriter) flush(offset uint64) { + if w.generator == nil { + return + } + for x := int(offset / filterBase); x > len(w.offsets); { + w.generate() + } +} + +func (w *filterWriter) finish() { + if w.generator == nil { + return + } + // Generate last keys. + + if w.nKeys > 0 { + w.generate() + } + w.offsets = append(w.offsets, uint32(w.buf.Len())) + for _, x := range w.offsets { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } + w.buf.WriteByte(filterBaseLg) +} + +func (w *filterWriter) generate() { + // Record offset. + w.offsets = append(w.offsets, uint32(w.buf.Len())) + // Generate filters. + if w.nKeys > 0 { + w.generator.Generate(&w.buf) + w.nKeys = 0 + } +} + +// Writer is a table writer. +type Writer struct { + writer io.Writer + err error + // Options + cmp comparer.Comparer + filter filter.Filter + compression opt.Compression + blockSize int + + dataBlock blockWriter + indexBlock blockWriter + filterBlock filterWriter + pendingBH blockHandle + offset uint64 + nEntries int + // Scratch allocated enough for 5 uvarint. Block writer should not use + // first 20-bytes since it will be used to encode block handle, which + // then passed to the block writer itself. + scratch [50]byte + comparerScratch []byte + compressionScratch []byte +} + +func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { + // Compress the buffer if necessary. + var b []byte + if compression == opt.SnappyCompression { + // Allocate scratch enough for compression and block trailer. + if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { + w.compressionScratch = make([]byte, n) + } + compressed := snappy.Encode(w.compressionScratch, buf.Bytes()) + n := len(compressed) + b = compressed[:n+blockTrailerLen] + b[n] = blockTypeSnappyCompression + } else { + tmp := buf.Alloc(blockTrailerLen) + tmp[0] = blockTypeNoCompression + b = buf.Bytes() + } + + // Calculate the checksum. + n := len(b) - 4 + checksum := util.NewCRC(b[:n]).Value() + binary.LittleEndian.PutUint32(b[n:], checksum) + + // Write the buffer to the file. + _, err = w.writer.Write(b) + if err != nil { + return + } + bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} + w.offset += uint64(len(b)) + return +} + +func (w *Writer) flushPendingBH(key []byte) { + if w.pendingBH.length == 0 { + return + } + var separator []byte + if len(key) == 0 { + separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) + } else { + separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) + } + if separator == nil { + separator = w.dataBlock.prevKey + } else { + w.comparerScratch = separator + } + n := encodeBlockHandle(w.scratch[:20], w.pendingBH) + // Append the block handle to the index block. + w.indexBlock.append(separator, w.scratch[:n]) + // Reset prev key of the data block. + w.dataBlock.prevKey = w.dataBlock.prevKey[:0] + // Clear pending block handle. + w.pendingBH = blockHandle{} +} + +func (w *Writer) finishBlock() error { + w.dataBlock.finish() + bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + return err + } + w.pendingBH = bh + // Reset the data block. + w.dataBlock.reset() + // Flush the filter block. + w.filterBlock.flush(w.offset) + return nil +} + +// Append appends key/value pair to the table. The keys passed must +// be in increasing order. +// +// It is safe to modify the contents of the arguments after Append returns. +func (w *Writer) Append(key, value []byte) error { + if w.err != nil { + return w.err + } + if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { + w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) + return w.err + } + + w.flushPendingBH(key) + // Append key/value pair to the data block. + w.dataBlock.append(key, value) + // Add key to the filter block. + w.filterBlock.add(key) + + // Finish the data block if block size target reached. + if w.dataBlock.bytesLen() >= w.blockSize { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.nEntries++ + return nil +} + +// BlocksLen returns number of blocks written so far. +func (w *Writer) BlocksLen() int { + n := w.indexBlock.nEntries + if w.pendingBH.length > 0 { + // Includes the pending block. + n++ + } + return n +} + +// EntriesLen returns number of entries added so far. +func (w *Writer) EntriesLen() int { + return w.nEntries +} + +// BytesLen returns number of bytes written so far. +func (w *Writer) BytesLen() int { + return int(w.offset) +} + +// Close will finalize the table. Calling Append is not possible +// after Close, but calling BlocksLen, EntriesLen and BytesLen +// is still possible. +func (w *Writer) Close() error { + if w.err != nil { + return w.err + } + + // Write the last data block. Or empty data block if there + // aren't any data blocks at all. + if w.dataBlock.nEntries > 0 || w.nEntries == 0 { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.flushPendingBH(nil) + + // Write the filter block. + var filterBH blockHandle + w.filterBlock.finish() + if buf := &w.filterBlock.buf; buf.Len() > 0 { + filterBH, w.err = w.writeBlock(buf, opt.NoCompression) + if w.err != nil { + return w.err + } + } + + // Write the metaindex block. + if filterBH.length > 0 { + key := []byte("filter." + w.filter.Name()) + n := encodeBlockHandle(w.scratch[:20], filterBH) + w.dataBlock.append(key, w.scratch[:n]) + } + w.dataBlock.finish() + metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the index block. + w.indexBlock.finish() + indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the table footer. + footer := w.scratch[:footerLen] + for i := range footer { + footer[i] = 0 + } + n := encodeBlockHandle(footer, metaindexBH) + encodeBlockHandle(footer[n:], indexBH) + copy(footer[footerLen-len(magic):], magic) + if _, err := w.writer.Write(footer); err != nil { + w.err = err + return w.err + } + w.offset += footerLen + + w.err = errors.New("leveldb/table: writer is closed") + return nil +} + +// NewWriter creates a new initialized table writer for the file. +// +// Table writer is not safe for concurrent use. +func NewWriter(f io.Writer, o *opt.Options) *Writer { + w := &Writer{ + writer: f, + cmp: o.GetComparer(), + filter: o.GetFilter(), + compression: o.GetCompression(), + blockSize: o.GetBlockSize(), + comparerScratch: make([]byte, 0), + } + // data block + w.dataBlock.restartInterval = o.GetBlockRestartInterval() + // The first 20-bytes are used for encoding block handle. + w.dataBlock.scratch = w.scratch[20:] + // index block + w.indexBlock.restartInterval = 1 + w.indexBlock.scratch = w.scratch[20:] + // filter block + if w.filter != nil { + w.filterBlock.generator = w.filter.NewGenerator() + w.filterBlock.flush(0) + } + return w +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go new file mode 100644 index 00000000000..0e2b519e5c7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go @@ -0,0 +1,98 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sort" + + "github.com/syndtr/goleveldb/leveldb/storage" +) + +func shorten(str string) string { + if len(str) <= 8 { + return str + } + return str[:3] + ".." + str[len(str)-3:] +} + +var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"} + +func shortenb(bytes int) string { + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%d%sB", bytes, bunits[i]) +} + +func sshortenb(bytes int) string { + if bytes == 0 { + return "~" + } + sign := "+" + if bytes < 0 { + sign = "-" + bytes *= -1 + } + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) +} + +func sint(x int) string { + if x == 0 { + return "~" + } + sign := "+" + if x < 0 { + sign = "-" + x *= -1 + } + return fmt.Sprintf("%s%d", sign, x) +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} + +type fdSorter []storage.FileDesc + +func (p fdSorter) Len() int { + return len(p) +} + +func (p fdSorter) Less(i, j int) bool { + return p[i].Num < p[j].Num +} + +func (p fdSorter) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func sortFds(fds []storage.FileDesc) { + sort.Sort(fdSorter(fds)) +} + +func ensureBuffer(b []byte, n int) []byte { + if cap(b) < n { + return make([]byte, n) + } + return b[:n] +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go new file mode 100644 index 00000000000..21de242552d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go @@ -0,0 +1,293 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package util + +// This a copy of Go std bytes.Buffer with some modification +// and some features stripped. + +import ( + "bytes" + "io" +) + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. +} + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + switch { + case n < 0 || n > b.Len(): + panic("leveldb/util.Buffer: truncation out of range") + case n == 0: + // Reuse buffer space. + b.off = 0 + } + b.buf = b.buf[0 : b.off+n] +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) grow(n int) int { + m := b.Len() + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Truncate(0) + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if b.buf == nil && n <= len(b.bootstrap) { + buf = b.bootstrap[0:] + } else if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + } + b.buf = buf + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Alloc allocs n bytes of slice from the buffer, growing the buffer as +// needed. If n is negative, Alloc will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Alloc(n int) []byte { + if n < 0 { + panic("leveldb/util.Buffer.Alloc: negative count") + } + m := b.grow(n) + return b.buf[m:] +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("leveldb/util.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with bytes.ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + newBuf := b.buf + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + MinRead) + } + copy(newBuf, b.buf[b.off:]) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with bytes.ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(bytes.ErrTooLarge) + } + }() + return make([]byte, n) +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("leveldb/util.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// bytes.ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + m := b.grow(1) + b.buf[m] = c + return nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + return c, nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + return line, err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go new file mode 100644 index 00000000000..2f3db974a79 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go @@ -0,0 +1,239 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "fmt" + "sync" + "sync/atomic" + "time" +) + +type buffer struct { + b []byte + miss int +} + +// BufferPool is a 'buffer pool'. +type BufferPool struct { + pool [6]chan []byte + size [5]uint32 + sizeMiss [5]uint32 + sizeHalf [5]uint32 + baseline [4]int + baseline0 int + + mu sync.RWMutex + closed bool + closeC chan struct{} + + get uint32 + put uint32 + half uint32 + less uint32 + equal uint32 + greater uint32 + miss uint32 +} + +func (p *BufferPool) poolNum(n int) int { + if n <= p.baseline0 && n > p.baseline0/2 { + return 0 + } + for i, x := range p.baseline { + if n <= x { + return i + 1 + } + } + return len(p.baseline) + 1 +} + +// Get returns buffer with length of n. +func (p *BufferPool) Get(n int) []byte { + if p == nil { + return make([]byte, n) + } + + p.mu.RLock() + defer p.mu.RUnlock() + + if p.closed { + return make([]byte, n) + } + + atomic.AddUint32(&p.get, 1) + + poolNum := p.poolNum(n) + pool := p.pool[poolNum] + if poolNum == 0 { + // Fast path. + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + select { + case pool <- b: + default: + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + } + default: + atomic.AddUint32(&p.miss, 1) + } + + return make([]byte, n, p.baseline0) + } else { + sizePtr := &p.size[poolNum-1] + + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + sizeHalfPtr := &p.sizeHalf[poolNum-1] + if atomic.AddUint32(sizeHalfPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) + atomic.StoreUint32(sizeHalfPtr, 0) + } else { + select { + case pool <- b: + default: + } + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { + select { + case pool <- b: + default: + } + } + } + default: + atomic.AddUint32(&p.miss, 1) + } + + if size := atomic.LoadUint32(sizePtr); uint32(n) > size { + if size == 0 { + atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) + } else { + sizeMissPtr := &p.sizeMiss[poolNum-1] + if atomic.AddUint32(sizeMissPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(n)) + atomic.StoreUint32(sizeMissPtr, 0) + } + } + return make([]byte, n) + } else { + return make([]byte, n, size) + } + } +} + +// Put adds given buffer to the pool. +func (p *BufferPool) Put(b []byte) { + if p == nil { + return + } + + p.mu.RLock() + defer p.mu.RUnlock() + + if p.closed { + return + } + + atomic.AddUint32(&p.put, 1) + + pool := p.pool[p.poolNum(cap(b))] + select { + case pool <- b: + default: + } + +} + +func (p *BufferPool) Close() { + if p == nil { + return + } + + p.mu.Lock() + if !p.closed { + p.closed = true + p.closeC <- struct{}{} + } + p.mu.Unlock() +} + +func (p *BufferPool) String() string { + if p == nil { + return "" + } + + return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", + p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) +} + +func (p *BufferPool) drain() { + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + for _, ch := range p.pool { + select { + case <-ch: + default: + } + } + case <-p.closeC: + close(p.closeC) + for _, ch := range p.pool { + close(ch) + } + return + } + } +} + +// NewBufferPool creates a new initialized 'buffer pool'. +func NewBufferPool(baseline int) *BufferPool { + if baseline <= 0 { + panic("baseline can't be <= 0") + } + p := &BufferPool{ + baseline0: baseline, + baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, + closeC: make(chan struct{}, 1), + } + for i, cap := range []int{2, 2, 4, 4, 2, 1} { + p.pool[i] = make(chan []byte, cap) + } + go p.drain() + return p +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go new file mode 100644 index 00000000000..631c9d6109c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go @@ -0,0 +1,30 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "hash/crc32" +) + +var table = crc32.MakeTable(crc32.Castagnoli) + +// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. +type CRC uint32 + +// NewCRC creates a new crc based on the given bytes. +func NewCRC(b []byte) CRC { + return CRC(0).Update(b) +} + +// Update updates the crc with the given bytes. +func (c CRC) Update(b []byte) CRC { + return CRC(crc32.Update(uint32(c), table, b)) +} + +// Value returns a masked crc. +func (c CRC) Value() uint32 { + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go new file mode 100644 index 00000000000..7f3fa4e2c79 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go @@ -0,0 +1,48 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "encoding/binary" +) + +// Hash return hash of the given data. +func Hash(data []byte, seed uint32) uint32 { + // Similar to murmur hash + const ( + m = uint32(0xc6a4a793) + r = uint32(24) + ) + var ( + h = seed ^ (uint32(len(data)) * m) + i int + ) + + for n := len(data) - len(data)%4; i < n; i += 4 { + h += binary.LittleEndian.Uint32(data[i:]) + h *= m + h ^= (h >> 16) + } + + switch len(data) - i { + default: + panic("not reached") + case 3: + h += uint32(data[i+2]) << 16 + fallthrough + case 2: + h += uint32(data[i+1]) << 8 + fallthrough + case 1: + h += uint32(data[i]) + h *= m + h ^= (h >> r) + case 0: + } + + return h +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go new file mode 100644 index 00000000000..85159583d2c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go @@ -0,0 +1,32 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +// Range is a key range. +type Range struct { + // Start of the key range, include in the range. + Start []byte + + // Limit of the key range, not include in the range. + Limit []byte +} + +// BytesPrefix returns key range that satisfy the given prefix. +// This only applicable for the standard 'bytes comparer'. +func BytesPrefix(prefix []byte) *Range { + var limit []byte + for i := len(prefix) - 1; i >= 0; i-- { + c := prefix[i] + if c < 0xff { + limit = make([]byte, i+1) + copy(limit, prefix) + limit[i] = c + 1 + break + } + } + return &Range{prefix, limit} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go new file mode 100644 index 00000000000..80614afc58f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go @@ -0,0 +1,73 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package util provides utilities used throughout leveldb. +package util + +import ( + "errors" +) + +var ( + ErrReleased = errors.New("leveldb: resource already relesed") + ErrHasReleaser = errors.New("leveldb: releaser already defined") +) + +// Releaser is the interface that wraps the basic Release method. +type Releaser interface { + // Release releases associated resources. Release should always success + // and can be called multiple times without causing error. + Release() +} + +// ReleaseSetter is the interface that wraps the basic SetReleaser method. +type ReleaseSetter interface { + // SetReleaser associates the given releaser to the resources. The + // releaser will be called once coresponding resources released. + // Calling SetReleaser with nil will clear the releaser. + // + // This will panic if a releaser already present or coresponding + // resource is already released. Releaser should be cleared first + // before assigned a new one. + SetReleaser(releaser Releaser) +} + +// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. +type BasicReleaser struct { + releaser Releaser + released bool +} + +// Released returns whether Release method already called. +func (r *BasicReleaser) Released() bool { + return r.released +} + +// Release implements Releaser.Release. +func (r *BasicReleaser) Release() { + if !r.released { + if r.releaser != nil { + r.releaser.Release() + r.releaser = nil + } + r.released = true + } +} + +// SetReleaser implements ReleaseSetter.SetReleaser. +func (r *BasicReleaser) SetReleaser(releaser Releaser) { + if r.released { + panic(ErrReleased) + } + if r.releaser != nil && releaser != nil { + panic(ErrHasReleaser) + } + r.releaser = releaser +} + +type NoopReleaser struct{} + +func (NoopReleaser) Release() {} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go new file mode 100644 index 00000000000..73f272af5ff --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/version.go @@ -0,0 +1,528 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sync/atomic" + "unsafe" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type tSet struct { + level int + table *tFile +} + +type version struct { + s *session + + levels []tFiles + + // Level that should be compacted next and its compaction score. + // Score < 1 means compaction is not strictly needed. These fields + // are initialized by computeCompaction() + cLevel int + cScore float64 + + cSeek unsafe.Pointer + + closing bool + ref int + released bool +} + +func newVersion(s *session) *version { + return &version{s: s} +} + +func (v *version) incref() { + if v.released { + panic("already released") + } + + v.ref++ + if v.ref == 1 { + // Incr file ref. + for _, tt := range v.levels { + for _, t := range tt { + v.s.addFileRef(t.fd, 1) + } + } + } +} + +func (v *version) releaseNB() { + v.ref-- + if v.ref > 0 { + return + } else if v.ref < 0 { + panic("negative version ref") + } + + for _, tt := range v.levels { + for _, t := range tt { + if v.s.addFileRef(t.fd, -1) == 0 { + v.s.tops.remove(t) + } + } + } + + v.released = true +} + +func (v *version) release() { + v.s.vmu.Lock() + v.releaseNB() + v.s.vmu.Unlock() +} + +func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) { + ukey := ikey.ukey() + + // Aux level. + if aux != nil { + for _, t := range aux { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(-1, t) { + return + } + } + } + + if lf != nil && !lf(-1) { + return + } + } + + // Walk tables level-by-level. + for level, tables := range v.levels { + if len(tables) == 0 { + continue + } + + if level == 0 { + // Level-0 files may overlap each other. Find all files that + // overlap ukey. + for _, t := range tables { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(level, t) { + return + } + } + } + } else { + if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { + t := tables[i] + if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + if !f(level, t) { + return + } + } + } + } + + if lf != nil && !lf(level) { + return + } + } +} + +func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { + if v.closing { + return nil, false, ErrClosed + } + + ukey := ikey.ukey() + + var ( + tset *tSet + tseek bool + + // Level-0. + zfound bool + zseq uint64 + zkt keyType + zval []byte + ) + + err = ErrNotFound + + // Since entries never hop across level, finding key/value + // in smaller level make later levels irrelevant. + v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool { + if level >= 0 && !tseek { + if tset == nil { + tset = &tSet{level, t} + } else { + tseek = true + } + } + + var ( + fikey, fval []byte + ferr error + ) + if noValue { + fikey, ferr = v.s.tops.findKey(t, ikey, ro) + } else { + fikey, fval, ferr = v.s.tops.find(t, ikey, ro) + } + + switch ferr { + case nil: + case ErrNotFound: + return true + default: + err = ferr + return false + } + + if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil { + if v.s.icmp.uCompare(ukey, fukey) == 0 { + // Level <= 0 may overlaps each-other. + if level <= 0 { + if fseq >= zseq { + zfound = true + zseq = fseq + zkt = fkt + zval = fval + } + } else { + switch fkt { + case keyTypeVal: + value = fval + err = nil + case keyTypeDel: + default: + panic("leveldb: invalid internalKey type") + } + return false + } + } + } else { + err = fkerr + return false + } + + return true + }, func(level int) bool { + if zfound { + switch zkt { + case keyTypeVal: + value = zval + err = nil + case keyTypeDel: + default: + panic("leveldb: invalid internalKey type") + } + return false + } + + return true + }) + + if tseek && tset.table.consumeSeek() <= 0 { + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + + return +} + +func (v *version) sampleSeek(ikey internalKey) (tcomp bool) { + var tset *tSet + + v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool { + if tset == nil { + tset = &tSet{level, t} + return true + } + if tset.table.consumeSeek() <= 0 { + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + return false + }, nil) + + return +} + +func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { + strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) + for level, tables := range v.levels { + if level == 0 { + // Merge all level zero files together since they may overlap. + for _, t := range tables { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + } else if len(tables) != 0 { + its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)) + } + } + return +} + +func (v *version) newStaging() *versionStaging { + return &versionStaging{base: v} +} + +// Spawn a new version based on this version. +func (v *version) spawn(r *sessionRecord) *version { + staging := v.newStaging() + staging.commit(r) + return staging.finish() +} + +func (v *version) fillRecord(r *sessionRecord) { + for level, tables := range v.levels { + for _, t := range tables { + r.addTableFile(level, t) + } + } +} + +func (v *version) tLen(level int) int { + if level < len(v.levels) { + return len(v.levels[level]) + } + return 0 +} + +func (v *version) offsetOf(ikey internalKey) (n int64, err error) { + for level, tables := range v.levels { + for _, t := range tables { + if v.s.icmp.Compare(t.imax, ikey) <= 0 { + // Entire file is before "ikey", so just add the file size + n += t.size + } else if v.s.icmp.Compare(t.imin, ikey) > 0 { + // Entire file is after "ikey", so ignore + if level > 0 { + // Files other than level 0 are sorted by meta->min, so + // no further files in this level will contain data for + // "ikey". + break + } + } else { + // "ikey" falls in the range for this table. Add the + // approximate offset of "ikey" within the table. + if m, err := v.s.tops.offsetOf(t, ikey); err == nil { + n += m + } else { + return 0, err + } + } + } + } + + return +} + +func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) { + if maxLevel > 0 { + if len(v.levels) == 0 { + return maxLevel + } + if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) { + var overlaps tFiles + for ; level < maxLevel; level++ { + if pLevel := level + 1; pLevel >= len(v.levels) { + return maxLevel + } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) { + break + } + if gpLevel := level + 2; gpLevel < len(v.levels) { + overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false) + if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) { + break + } + } + } + } + } + return +} + +func (v *version) computeCompaction() { + // Precomputed best level for next compaction + bestLevel := int(-1) + bestScore := float64(-1) + + statFiles := make([]int, len(v.levels)) + statSizes := make([]string, len(v.levels)) + statScore := make([]string, len(v.levels)) + statTotSize := int64(0) + + for level, tables := range v.levels { + var score float64 + size := tables.size() + if level == 0 { + // We treat level-0 specially by bounding the number of files + // instead of number of bytes for two reasons: + // + // (1) With larger write-buffer sizes, it is nice not to do too + // many level-0 compaction. + // + // (2) The files in level-0 are merged on every read and + // therefore we wish to avoid too many files when the individual + // file size is small (perhaps because of a small write-buffer + // setting, or very high compression ratios, or lots of + // overwrites/deletions). + score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) + } else { + score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level)) + } + + if score > bestScore { + bestLevel = level + bestScore = score + } + + statFiles[level] = len(tables) + statSizes[level] = shortenb(int(size)) + statScore[level] = fmt.Sprintf("%.2f", score) + statTotSize += size + } + + v.cLevel = bestLevel + v.cScore = bestScore + + v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore) +} + +func (v *version) needCompaction() bool { + return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil +} + +type tablesScratch struct { + added map[int64]atRecord + deleted map[int64]struct{} +} + +type versionStaging struct { + base *version + levels []tablesScratch +} + +func (p *versionStaging) getScratch(level int) *tablesScratch { + if level >= len(p.levels) { + newLevels := make([]tablesScratch, level+1) + copy(newLevels, p.levels) + p.levels = newLevels + } + return &(p.levels[level]) +} + +func (p *versionStaging) commit(r *sessionRecord) { + // Deleted tables. + for _, r := range r.deletedTables { + scratch := p.getScratch(r.level) + if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 { + if scratch.deleted == nil { + scratch.deleted = make(map[int64]struct{}) + } + scratch.deleted[r.num] = struct{}{} + } + if scratch.added != nil { + delete(scratch.added, r.num) + } + } + + // New tables. + for _, r := range r.addedTables { + scratch := p.getScratch(r.level) + if scratch.added == nil { + scratch.added = make(map[int64]atRecord) + } + scratch.added[r.num] = r + if scratch.deleted != nil { + delete(scratch.deleted, r.num) + } + } +} + +func (p *versionStaging) finish() *version { + // Build new version. + nv := newVersion(p.base.s) + numLevel := len(p.levels) + if len(p.base.levels) > numLevel { + numLevel = len(p.base.levels) + } + nv.levels = make([]tFiles, numLevel) + for level := 0; level < numLevel; level++ { + var baseTabels tFiles + if level < len(p.base.levels) { + baseTabels = p.base.levels[level] + } + + if level < len(p.levels) { + scratch := p.levels[level] + + var nt tFiles + // Prealloc list if possible. + if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 { + nt = make(tFiles, 0, n) + } + + // Base tables. + for _, t := range baseTabels { + if _, ok := scratch.deleted[t.fd.Num]; ok { + continue + } + if _, ok := scratch.added[t.fd.Num]; ok { + continue + } + nt = append(nt, t) + } + + // New tables. + for _, r := range scratch.added { + nt = append(nt, tableFileFromRecord(r)) + } + + if len(nt) != 0 { + // Sort tables. + if level == 0 { + nt.sortByNum() + } else { + nt.sortByKey(p.base.s.icmp) + } + + nv.levels[level] = nt + } + } else { + nv.levels[level] = baseTabels + } + } + + // Trim levels. + n := len(nv.levels) + for ; n > 0 && nv.levels[n-1] == nil; n-- { + } + nv.levels = nv.levels[:n] + + // Compute compaction score for new version. + nv.computeCompaction() + + return nv +} + +type versionReleaser struct { + v *version + once bool +} + +func (vr *versionReleaser) Release() { + v := vr.v + v.s.vmu.Lock() + if !vr.once { + v.releaseNB() + vr.once = true + } + v.s.vmu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 69ef3c0b595..5de1b031ec2 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -722,6 +722,6 @@ const ( ) // Version is the current grpc version. -const Version = "1.12.0" +const Version = "1.12.2" const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 8b93e222e9e..ab356189050 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -682,12 +682,25 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { }) } +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.headerOk || s.getState() == streamDone { + if s.updateHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite } - s.headerOk = true + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -695,7 +708,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { s.header = md } } - md = s.header + t.writeHeaderLocked(s) + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) writeHeaderLocked(s *Stream) { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -704,15 +722,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { if s.sendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } - for k, vv := range md { - if isReservedHeader(k) { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) t.controlBuf.put(&headerFrame{ streamID: s.id, hf: headerFields, @@ -728,7 +738,6 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { outHeader := &stats.OutHeader{} t.stats.HandleRPC(s.Context(), outHeader) } - return nil } // WriteStatus sends stream status to the client and terminates the stream. @@ -736,21 +745,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { - if !s.headerOk && s.header.Len() > 0 { - if err := t.WriteHeader(s, nil); err != nil { - return err - } - } else { - if s.getState() == streamDone { - return nil - } + if s.getState() == streamDone { + return nil } + s.hdrMu.Lock() // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. - if !s.headerOk { - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + t.writeHeaderLocked(s) + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) @@ -766,16 +774,8 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { } // Attach the trailer metadata. - for k, vv := range s.trailer { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - trailer := &headerFrame{ + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ streamID: s.id, hf: headerFields, endStream: true, @@ -783,7 +783,8 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { atomic.StoreUint32(&t.resetPingStrikes, 1) }, } - t.closeStream(s, false, 0, trailer, true) + s.hdrMu.Unlock() + t.closeStream(s, false, 0, trailingHeader, true) if t.stats != nil { t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } @@ -793,7 +794,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - if !s.headerOk { // Headers haven't been written yet. + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { // TODO(mmukhi, dfawley): Make sure this is the right code to return. return streamErrorf(codes.Internal, "transport: %v", err) diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index a355866089c..835c8126946 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -531,10 +531,14 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { if w.err != nil { return 0, w.err } - n = copy(w.buf[w.offset:], b) - w.offset += n - if w.offset >= w.batchSize { - err = w.Flush() + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } } return n, err } diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index 2f643a3d042..f51f878884d 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -185,13 +185,20 @@ type Stream struct { headerChan chan struct{} // closed to indicate the end of header metadata. headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - header metadata.MD // the received header metadata. - trailer metadata.MD // the key-value map of trailer metadata. - headerOk bool // becomes true from the first header is about to send - state streamState + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + header metadata.MD // the received header metadata. + trailer metadata.MD // the key-value map of trailer metadata. - status *status.Status // the status error received from the server + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status bytesReceived uint32 // indicates whether any bytes have been received on this stream unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream @@ -201,6 +208,17 @@ type Stream struct { contentSubtype string } +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + func (s *Stream) swapState(st streamState) streamState { return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) } @@ -313,10 +331,12 @@ func (s *Stream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } - if s.headerOk || atomic.LoadUint32((*uint32)(&s.state)) == uint32(streamDone) { + if s.isHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite } + s.hdrMu.Lock() s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() return nil } @@ -335,7 +355,12 @@ func (s *Stream) SetTrailer(md metadata.MD) error { if md.Len() == 0 { return nil } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() return nil }