From 9835cb1b2a36d07b81c80b430c80c0887bb627b1 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 01:41:40 -0400 Subject: [PATCH 01/37] WIP [dbnode] Less allocations for block rotations --- .../services/m3dbnode/config/config_mock.go | 20 -- src/dbnode/digest/digest_mock.go | 29 +-- src/dbnode/encoding/encoding_mock.go | 104 ++++---- src/dbnode/generated-source-files.mk | 2 +- src/dbnode/persist/fs/commitlog/reader.go | 30 +-- src/dbnode/persist/fs/commitlog/writer.go | 29 +-- src/dbnode/persist/fs/fs_mock.go | 36 +-- src/dbnode/persist/fs/merger.go | 84 +++---- src/dbnode/persist/fs/persist_manager.go | 6 +- src/dbnode/persist/fs/types.go | 7 +- src/dbnode/persist/fs/write.go | 71 +++--- src/dbnode/persist/persist_mock.go | 29 +-- src/dbnode/persist/types.go | 124 +++++++++- .../storage/bootstrap/bootstrap_mock.go | 40 +-- .../bootstrapper/commitlog/source.go | 15 -- .../bootstrap/bootstrapper/fs/source.go | 2 +- .../bootstrap/bootstrapper/peers/source.go | 8 +- src/dbnode/storage/database_test.go | 3 - .../storage/dirty_series_new_map_gen.go | 44 +--- src/dbnode/storage/fs_merge_with_mem.go | 33 +-- src/dbnode/storage/fs_merge_with_mem_test.go | 34 ++- src/dbnode/storage/id_list_gen.go | 16 +- src/dbnode/storage/index.go | 2 +- src/dbnode/storage/index/convert/convert.go | 73 ++---- .../storage/index/convert/convert_test.go | 22 -- .../storage/index_query_concurrent_test.go | 2 +- src/dbnode/storage/namespace.go | 2 +- src/dbnode/storage/series/buffer.go | 29 +-- src/dbnode/storage/series/buffer_mock.go | 78 +++--- src/dbnode/storage/series/buffer_test.go | 101 ++++---- .../storage/series/lookup/lookup_mock.go | 20 -- src/dbnode/storage/series/series.go | 32 +-- src/dbnode/storage/series/series_mock.go | 49 ++-- src/dbnode/storage/series/series_test.go | 11 +- src/dbnode/storage/series/types.go | 7 +- src/dbnode/storage/shard.go | 124 ++++------ .../shard_fetch_blocks_metadata_test.go | 5 +- src/dbnode/storage/shard_test.go | 33 +-- src/dbnode/storage/storage_mock.go | 230 ++++++++---------- src/dbnode/storage/types.go | 5 - src/dbnode/ts/types.go | 3 - src/dbnode/x/xio/io_mock.go | 20 -- src/m3ninx/doc/document.go | 10 +- src/x/ident/identifier_pool.go | 2 +- src/x/ident/tag_iterator.go | 88 +++++-- src/x/ident/types.go | 4 + 46 files changed, 708 insertions(+), 1010 deletions(-) diff --git a/src/cmd/services/m3dbnode/config/config_mock.go b/src/cmd/services/m3dbnode/config/config_mock.go index a511f9fd67..43c0512d6e 100644 --- a/src/cmd/services/m3dbnode/config/config_mock.go +++ b/src/cmd/services/m3dbnode/config/config_mock.go @@ -1,26 +1,6 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/cmd/services/m3dbnode/config (interfaces: BootstrapConfigurationValidator) -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Package config is a generated GoMock package. package config diff --git a/src/dbnode/digest/digest_mock.go b/src/dbnode/digest/digest_mock.go index 8892e8bb4d..31a44d7235 100644 --- a/src/dbnode/digest/digest_mock.go +++ b/src/dbnode/digest/digest_mock.go @@ -1,35 +1,14 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/digest (interfaces: ReaderWithDigest) -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Package digest is a generated GoMock package. package digest import ( - "hash" - "io" - "reflect" - - "github.com/golang/mock/gomock" + gomock "github.com/golang/mock/gomock" + hash "hash" + io "io" + reflect "reflect" ) // MockReaderWithDigest is a mock of ReaderWithDigest interface diff --git a/src/dbnode/encoding/encoding_mock.go b/src/dbnode/encoding/encoding_mock.go index fa61de0683..08ca8cefac 100644 --- a/src/dbnode/encoding/encoding_mock.go +++ b/src/dbnode/encoding/encoding_mock.go @@ -1,46 +1,24 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/dbnode/encoding/types.go - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/encoding/types.go // Package encoding is a generated GoMock package. package encoding import ( - "io" - "reflect" - "time" - - "github.com/m3db/m3/src/dbnode/namespace" - "github.com/m3db/m3/src/dbnode/ts" - "github.com/m3db/m3/src/dbnode/x/xio" - "github.com/m3db/m3/src/dbnode/x/xpool" - "github.com/m3db/m3/src/x/checked" - "github.com/m3db/m3/src/x/context" - "github.com/m3db/m3/src/x/ident" - "github.com/m3db/m3/src/x/pool" - "github.com/m3db/m3/src/x/serialize" - time0 "github.com/m3db/m3/src/x/time" - - "github.com/golang/mock/gomock" + gomock "github.com/golang/mock/gomock" + namespace "github.com/m3db/m3/src/dbnode/namespace" + ts "github.com/m3db/m3/src/dbnode/ts" + xio "github.com/m3db/m3/src/dbnode/x/xio" + xpool "github.com/m3db/m3/src/dbnode/x/xpool" + checked "github.com/m3db/m3/src/x/checked" + context "github.com/m3db/m3/src/x/context" + ident "github.com/m3db/m3/src/x/ident" + pool "github.com/m3db/m3/src/x/pool" + serialize "github.com/m3db/m3/src/x/serialize" + time "github.com/m3db/m3/src/x/time" + io "io" + reflect "reflect" + time0 "time" ) // MockEncoder is a mock of Encoder interface @@ -79,7 +57,7 @@ func (mr *MockEncoderMockRecorder) SetSchema(descr interface{}) *gomock.Call { } // Encode mocks base method -func (m *MockEncoder) Encode(dp ts.Datapoint, unit time0.Unit, annotation ts.Annotation) error { +func (m *MockEncoder) Encode(dp ts.Datapoint, unit time.Unit, annotation ts.Annotation) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Encode", dp, unit, annotation) ret0, _ := ret[0].(error) @@ -166,7 +144,7 @@ func (mr *MockEncoderMockRecorder) Len() *gomock.Call { } // Reset mocks base method -func (m *MockEncoder) Reset(t time.Time, capacity int, schema namespace.SchemaDescr) { +func (m *MockEncoder) Reset(t time0.Time, capacity int, schema namespace.SchemaDescr) { m.ctrl.T.Helper() m.ctrl.Call(m, "Reset", t, capacity, schema) } @@ -204,7 +182,7 @@ func (mr *MockEncoderMockRecorder) Discard() *gomock.Call { } // DiscardReset mocks base method -func (m *MockEncoder) DiscardReset(t time.Time, capacity int, schema namespace.SchemaDescr) ts.Segment { +func (m *MockEncoder) DiscardReset(t time0.Time, capacity int, schema namespace.SchemaDescr) ts.Segment { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DiscardReset", t, capacity, schema) ret0, _ := ret[0].(ts.Segment) @@ -241,7 +219,7 @@ func (m *MockOptions) EXPECT() *MockOptionsMockRecorder { } // SetDefaultTimeUnit mocks base method -func (m *MockOptions) SetDefaultTimeUnit(tu time0.Unit) Options { +func (m *MockOptions) SetDefaultTimeUnit(tu time.Unit) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetDefaultTimeUnit", tu) ret0, _ := ret[0].(Options) @@ -255,10 +233,10 @@ func (mr *MockOptionsMockRecorder) SetDefaultTimeUnit(tu interface{}) *gomock.Ca } // DefaultTimeUnit mocks base method -func (m *MockOptions) DefaultTimeUnit() time0.Unit { +func (m *MockOptions) DefaultTimeUnit() time.Unit { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DefaultTimeUnit") - ret0, _ := ret[0].(time0.Unit) + ret0, _ := ret[0].(time.Unit) return ret0 } @@ -269,7 +247,7 @@ func (mr *MockOptionsMockRecorder) DefaultTimeUnit() *gomock.Call { } // SetTimeEncodingSchemes mocks base method -func (m *MockOptions) SetTimeEncodingSchemes(value map[time0.Unit]TimeEncodingScheme) Options { +func (m *MockOptions) SetTimeEncodingSchemes(value map[time.Unit]TimeEncodingScheme) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetTimeEncodingSchemes", value) ret0, _ := ret[0].(Options) @@ -586,11 +564,11 @@ func (mr *MockIteratorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockIterator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { +func (m *MockIterator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time0.Unit) + ret1, _ := ret[1].(time.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -665,11 +643,11 @@ func (mr *MockReaderIteratorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockReaderIterator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { +func (m *MockReaderIterator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time0.Unit) + ret1, _ := ret[1].(time.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -756,11 +734,11 @@ func (mr *MockMultiReaderIteratorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockMultiReaderIterator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { +func (m *MockMultiReaderIterator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time0.Unit) + ret1, _ := ret[1].(time.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -798,7 +776,7 @@ func (mr *MockMultiReaderIteratorMockRecorder) Close() *gomock.Call { } // Reset mocks base method -func (m *MockMultiReaderIterator) Reset(readers []xio.SegmentReader, start time.Time, blockSize time.Duration, schema namespace.SchemaDescr) { +func (m *MockMultiReaderIterator) Reset(readers []xio.SegmentReader, start time0.Time, blockSize time0.Duration, schema namespace.SchemaDescr) { m.ctrl.T.Helper() m.ctrl.Call(m, "Reset", readers, start, blockSize, schema) } @@ -887,11 +865,11 @@ func (mr *MockSeriesIteratorAccumulatorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockSeriesIteratorAccumulator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { +func (m *MockSeriesIteratorAccumulator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time0.Unit) + ret1, _ := ret[1].(time.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -957,10 +935,10 @@ func (mr *MockSeriesIteratorAccumulatorMockRecorder) Namespace() *gomock.Call { } // Start mocks base method -func (m *MockSeriesIteratorAccumulator) Start() time.Time { +func (m *MockSeriesIteratorAccumulator) Start() time0.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) return ret0 } @@ -971,10 +949,10 @@ func (mr *MockSeriesIteratorAccumulatorMockRecorder) Start() *gomock.Call { } // End mocks base method -func (m *MockSeriesIteratorAccumulator) End() time.Time { +func (m *MockSeriesIteratorAccumulator) End() time0.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "End") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) return ret0 } @@ -1104,11 +1082,11 @@ func (mr *MockSeriesIteratorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockSeriesIterator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { +func (m *MockSeriesIterator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time0.Unit) + ret1, _ := ret[1].(time.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -1174,10 +1152,10 @@ func (mr *MockSeriesIteratorMockRecorder) Namespace() *gomock.Call { } // Start mocks base method -func (m *MockSeriesIterator) Start() time.Time { +func (m *MockSeriesIterator) Start() time0.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) return ret0 } @@ -1188,10 +1166,10 @@ func (mr *MockSeriesIteratorMockRecorder) Start() *gomock.Call { } // End mocks base method -func (m *MockSeriesIterator) End() time.Time { +func (m *MockSeriesIterator) End() time0.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "End") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) return ret0 } diff --git a/src/dbnode/generated-source-files.mk b/src/dbnode/generated-source-files.mk index fc1c7d29be..400a7c68c4 100644 --- a/src/dbnode/generated-source-files.mk +++ b/src/dbnode/generated-source-files.mk @@ -302,7 +302,7 @@ genny-list-all: \ genny-list-storage-id: cd $(m3x_package_path) && make genny-pooled-elem-list-gen \ pkg=storage \ - value_type=ident.ID \ + value_type=doc.Document \ rename_type_prefix=id \ rename_type_middle=ID \ target_package=github.com/m3db/m3/src/dbnode/storage diff --git a/src/dbnode/persist/fs/commitlog/reader.go b/src/dbnode/persist/fs/commitlog/reader.go index 763f0aacf7..4e2fe77938 100644 --- a/src/dbnode/persist/fs/commitlog/reader.go +++ b/src/dbnode/persist/fs/commitlog/reader.go @@ -288,33 +288,19 @@ func (r *reader) seriesMetadataForEntry( // Find or allocate the namespace ID. namespaceID := r.namespaceIDReused(decoded.Namespace) - var ( - idPool = r.opts.commitLogOptions.IdentifierPool() - tags ident.Tags - tagBytesLen = len(decoded.EncodedTags) - ) - if tagBytesLen != 0 { - r.tagDecoderCheckedBytes.Reset(decoded.EncodedTags) - r.tagDecoder.Reset(r.tagDecoderCheckedBytes) - - tags = idPool.Tags() - for r.tagDecoder.Next() { - curr := r.tagDecoder.Current() - tags.Append(idPool.CloneTag(curr)) - } - err = r.tagDecoder.Err() - if err != nil { - return ts.Series{}, err - } - } + // Need to copy encoded tags since will be invalid when + // progressing to next record. + encodedTags := append( + make([]byte, 0, len(decoded.EncodedTags)), + decoded.EncodedTags...) - seriesID := idPool.BinaryID(id) + idPool := r.opts.commitLogOptions.IdentifierPool() metadata = ts.Series{ UniqueIndex: entry.Index, - ID: seriesID, + ID: idPool.BinaryID(id), Namespace: namespaceID, Shard: decoded.Shard, - Tags: tags, + EncodedTags: ts.EncodedTags(encodedTags), } r.metadataLookup[entry.Index] = metadata diff --git a/src/dbnode/persist/fs/commitlog/writer.go b/src/dbnode/persist/fs/commitlog/writer.go index 8a186bfe2b..864184e533 100644 --- a/src/dbnode/persist/fs/commitlog/writer.go +++ b/src/dbnode/persist/fs/commitlog/writer.go @@ -35,9 +35,7 @@ import ( "github.com/m3db/m3/src/dbnode/persist/fs/msgpack" "github.com/m3db/m3/src/dbnode/persist/schema" "github.com/m3db/m3/src/dbnode/ts" - "github.com/m3db/m3/src/x/ident" xos "github.com/m3db/m3/src/x/os" - "github.com/m3db/m3/src/x/serialize" xtime "github.com/m3db/m3/src/x/time" ) @@ -109,8 +107,6 @@ type writer struct { logEncoder *msgpack.Encoder logEncoderBuff []byte metadataEncoderBuff []byte - tagEncoder serialize.TagEncoder - tagSliceIter ident.TagsIterator opts Options } @@ -133,8 +129,6 @@ func newCommitLogWriter( logEncoder: msgpack.NewEncoder(), logEncoderBuff: make([]byte, 0, defaultEncoderBuffSize), metadataEncoderBuff: make([]byte, 0, defaultEncoderBuffSize), - tagEncoder: opts.FilesystemOptions().TagEncoderPool().Get(), - tagSliceIter: ident.NewTagsIterator(ident.Tags{}), opts: opts, } } @@ -203,34 +197,13 @@ func (w *writer) Write( seen := w.seen.Test(uint(series.UniqueIndex)) if !seen { - var encodedTags []byte - if series.EncodedTags != nil { - // If already serialized use the serialized tags. - encodedTags = series.EncodedTags - } else if series.Tags.Values() != nil { - // Otherwise serialize the tags. - w.tagSliceIter.Reset(series.Tags) - w.tagEncoder.Reset() - err := w.tagEncoder.Encode(w.tagSliceIter) - if err != nil { - return err - } - - encodedTagsChecked, ok := w.tagEncoder.Data() - if !ok { - return errTagEncoderDataNotAvailable - } - - encodedTags = encodedTagsChecked.Bytes() - } - // If "idx" likely hasn't been written to commit log // yet we need to include series metadata var metadata schema.LogMetadata metadata.ID = series.ID.Bytes() metadata.Namespace = series.Namespace.Bytes() metadata.Shard = series.Shard - metadata.EncodedTags = encodedTags + metadata.EncodedTags = series.EncodedTags var err error w.metadataEncoderBuff, err = msgpack.EncodeLogMetadataFast(w.metadataEncoderBuff[:0], metadata) diff --git a/src/dbnode/persist/fs/fs_mock.go b/src/dbnode/persist/fs/fs_mock.go index 635cc05875..1ca3c03f55 100644 --- a/src/dbnode/persist/fs/fs_mock.go +++ b/src/dbnode/persist/fs/fs_mock.go @@ -1,26 +1,6 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/persist/fs (interfaces: DataFileSetWriter,DataFileSetReader,DataFileSetSeeker,IndexFileSetWriter,IndexFileSetReader,IndexSegmentFileSetWriter,IndexSegmentFileSet,IndexSegmentFile,SnapshotMetadataFileWriter,DataFileSetSeekerManager,ConcurrentDataFileSetSeeker,MergeWith) -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Package fs is a generated GoMock package. package fs @@ -109,31 +89,31 @@ func (mr *MockDataFileSetWriterMockRecorder) Open(arg0 interface{}) *gomock.Call } // Write mocks base method -func (m *MockDataFileSetWriter) Write(arg0 ident.ID, arg1 ident.Tags, arg2 checked.Bytes, arg3 uint32) error { +func (m *MockDataFileSetWriter) Write(arg0 persist.Metadata, arg1 checked.Bytes, arg2 uint32) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Write", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // Write indicates an expected call of Write -func (mr *MockDataFileSetWriterMockRecorder) Write(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockDataFileSetWriterMockRecorder) Write(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockDataFileSetWriter)(nil).Write), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockDataFileSetWriter)(nil).Write), arg0, arg1, arg2) } // WriteAll mocks base method -func (m *MockDataFileSetWriter) WriteAll(arg0 ident.ID, arg1 ident.Tags, arg2 []checked.Bytes, arg3 uint32) error { +func (m *MockDataFileSetWriter) WriteAll(arg0 persist.Metadata, arg1 []checked.Bytes, arg2 uint32) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteAll", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "WriteAll", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // WriteAll indicates an expected call of WriteAll -func (mr *MockDataFileSetWriterMockRecorder) WriteAll(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockDataFileSetWriterMockRecorder) WriteAll(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteAll", reflect.TypeOf((*MockDataFileSetWriter)(nil).WriteAll), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteAll", reflect.TypeOf((*MockDataFileSetWriter)(nil).WriteAll), arg0, arg1, arg2) } // MockDataFileSetReader is a mock of DataFileSetReader interface diff --git a/src/dbnode/persist/fs/merger.go b/src/dbnode/persist/fs/merger.go index 1b5aeb2736..4ffed8330d 100644 --- a/src/dbnode/persist/fs/merger.go +++ b/src/dbnode/persist/fs/merger.go @@ -28,9 +28,9 @@ import ( "github.com/m3db/m3/src/dbnode/namespace" "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/storage/block" - "github.com/m3db/m3/src/dbnode/storage/index/convert" "github.com/m3db/m3/src/dbnode/ts" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" @@ -95,7 +95,6 @@ func (m *merger) Merge( blockAllocSize = m.blockAllocSize srPool = m.srPool multiIterPool = m.multiIterPool - identPool = m.identPool encoderPool = m.encoderPool nsOpts = m.nsOpts @@ -158,19 +157,6 @@ func (m *merger) Merge( multiIter = multiIterPool.Get() ctx = m.contextPool.Get() - // We keep track of IDs/tags to finalize at the end of merging. This - // only applies to those that come from disk Reads, since the whole - // lifecycle of those IDs/tags are contained to this function. We don't - // want finalize the IDs from memory since other components may have - // ownership over it. - // - // We must only finalize these at the end of this function, since the - // flush preparer's underlying writer holds on to those references - // until it is closed (closing the PreparedDataPersist at the end of - // this merge closes the underlying writer). - idsToFinalize = make([]ident.ID, 0, reader.Entries()) - tagsToFinalize = make([]ident.Tags, 0, reader.Entries()) - // Shared between iterations. iterResources = newIterResources( multiIter, @@ -183,12 +169,6 @@ func (m *merger) Merge( defer func() { segReader.Finalize() multiIter.Close() - for _, res := range idsToFinalize { - res.Finalize() - } - for _, res := range tagsToFinalize { - res.Finalize() - } }() // The merge is performed in two stages. The first stage is to loop through @@ -203,7 +183,6 @@ func (m *merger) Merge( if err != nil { return closer, err } - idsToFinalize = append(idsToFinalize, id) segmentReaders = segmentReaders[:0] seg := segmentReaderFromData(data, checksum, segReader) @@ -219,14 +198,13 @@ func (m *merger) Merge( segmentReaders = appendBlockReadersToSegmentReaders(segmentReaders, mergeWithData) } - // tagsIter is never nil. These tags will be valid as long as the IDs - // are valid, and the IDs are valid for the duration of the file writing. - tags, err := convert.TagsFromTagsIter(id, tagsIter, identPool) - tagsIter.Close() - if err != nil { - return closer, err - } - tagsToFinalize = append(tagsToFinalize, tags) + // Inform the writer to finalize the ID and tag iterator once + // the volume is written. + metadata := persist.NewMetadataFromIDAndTagIterator(id, tagsIter, + persist.MetadataOptions{ + FinalizeID: true, + FinalizeTagIterator: true, + }) // In the special (but common) case that we're just copying the series data from the old file // into the new one without merging or adding any additional data we can avoid recalculating @@ -237,11 +215,11 @@ func (m *merger) Merge( return closer, err } - if err := persistSegmentWithChecksum(id, tags, segment, checksum, prepared.Persist); err != nil { + if err := persistSegmentWithChecksum(metadata, segment, checksum, prepared.Persist); err != nil { return closer, err } } else { - if err := persistSegmentReaders(id, tags, segmentReaders, iterResources, prepared.Persist); err != nil { + if err := persistSegmentReaders(metadata, segmentReaders, iterResources, prepared.Persist); err != nil { return closer, err } } @@ -256,18 +234,19 @@ func (m *merger) Merge( ctx.Reset() err = mergeWith.ForEachRemaining( ctx, blockStart, - func(id ident.ID, tags ident.Tags, mergeWithData block.FetchBlockResult) error { + func(seriesMetadata doc.Document, mergeWithData block.FetchBlockResult) error { segmentReaders = segmentReaders[:0] segmentReaders = appendBlockReadersToSegmentReaders(segmentReaders, mergeWithData.Blocks) - err := persistSegmentReaders(id, tags, segmentReaders, iterResources, prepared.Persist) + + metadata := persist.NewMetadata(seriesMetadata) + err := persistSegmentReaders(metadata, segmentReaders, iterResources, prepared.Persist) if err == nil { err = onFlush.OnFlushNewSeries(persist.OnFlushNewSeriesEvent{ - Shard: shard, - BlockStart: startTime, - FirstWrite: mergeWithData.FirstWrite, - ID: id, - Tags: tags, + Shard: shard, + BlockStart: startTime, + FirstWrite: mergeWithData.FirstWrite, + SeriesMetadata: seriesMetadata, }) } @@ -305,8 +284,7 @@ func segmentReaderFromData( } func persistSegmentReaders( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, segReaders []xio.SegmentReader, ir iterResources, persistFn persist.DataFn, @@ -316,15 +294,14 @@ func persistSegmentReaders( } if len(segReaders) == 1 { - return persistSegmentReader(id, tags, segReaders[0], persistFn) + return persistSegmentReader(metadata, segReaders[0], persistFn) } - return persistIter(id, tags, segReaders, ir, persistFn) + return persistIter(metadata, segReaders, ir, persistFn) } func persistIter( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, segReaders []xio.SegmentReader, ir iterResources, persistFn persist.DataFn, @@ -345,12 +322,11 @@ func persistIter( } segment := encoder.Discard() - return persistSegment(id, tags, segment, persistFn) + return persistSegment(metadata, segment, persistFn) } func persistSegmentReader( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, segmentReader xio.SegmentReader, persistFn persist.DataFn, ) error { @@ -358,27 +334,25 @@ func persistSegmentReader( if err != nil { return err } - return persistSegment(id, tags, segment, persistFn) + return persistSegment(metadata, segment, persistFn) } func persistSegment( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, segment ts.Segment, persistFn persist.DataFn, ) error { checksum := segment.CalculateChecksum() - return persistFn(id, tags, segment, checksum) + return persistFn(metadata, segment, checksum) } func persistSegmentWithChecksum( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, segment ts.Segment, checksum uint32, persistFn persist.DataFn, ) error { - return persistFn(id, tags, segment, checksum) + return persistFn(metadata, segment, checksum) } type iterResources struct { diff --git a/src/dbnode/persist/fs/persist_manager.go b/src/dbnode/persist/fs/persist_manager.go index 05a32d9799..867f8fea88 100644 --- a/src/dbnode/persist/fs/persist_manager.go +++ b/src/dbnode/persist/fs/persist_manager.go @@ -36,7 +36,6 @@ import ( m3ninxpersist "github.com/m3db/m3/src/m3ninx/persist" "github.com/m3db/m3/src/x/checked" xclose "github.com/m3db/m3/src/x/close" - "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/instrument" "github.com/pborman/uuid" @@ -496,8 +495,7 @@ func (pm *persistManager) PrepareData(opts persist.DataPrepareOptions) (persist. } func (pm *persistManager) persist( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, segment ts.Segment, checksum uint32, ) error { @@ -529,7 +527,7 @@ func (pm *persistManager) persist( pm.dataPM.segmentHolder[0] = segment.Head pm.dataPM.segmentHolder[1] = segment.Tail - err := pm.dataPM.writer.WriteAll(id, tags, pm.dataPM.segmentHolder, checksum) + err := pm.dataPM.writer.WriteAll(metadata, pm.dataPM.segmentHolder, checksum) pm.count++ pm.bytesWritten += int64(segment.Len()) diff --git a/src/dbnode/persist/fs/types.go b/src/dbnode/persist/fs/types.go index e17d453635..cded4b51dc 100644 --- a/src/dbnode/persist/fs/types.go +++ b/src/dbnode/persist/fs/types.go @@ -35,6 +35,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/m3ninx/index/segment/fst" idxpersist "github.com/m3db/m3/src/m3ninx/persist" "github.com/m3db/m3/src/x/checked" @@ -87,11 +88,11 @@ type DataFileSetWriter interface { // Write will write the id and data pair and returns an error on a write error. Callers // must not call this method with a given ID more than once. - Write(id ident.ID, tags ident.Tags, data checked.Bytes, checksum uint32) error + Write(metadata persist.Metadata, data checked.Bytes, checksum uint32) error // WriteAll will write the id and all byte slices and returns an error on a write error. // Callers must not call this method with a given ID more than once. - WriteAll(id ident.ID, tags ident.Tags, data []checked.Bytes, checksum uint32) error + WriteAll(metadata persist.Metadata, data []checked.Bytes, checksum uint32) error // DeferClose returns a DataCloser that defers writing of a checkpoint file. DeferClose() (persist.DataCloser, error) @@ -527,7 +528,7 @@ type BlockRetrieverOptions interface { // ForEachRemainingFn is the function that is run on each of the remaining // series of the merge target that did not intersect with the fileset. -type ForEachRemainingFn func(seriesID ident.ID, tags ident.Tags, data block.FetchBlockResult) error +type ForEachRemainingFn func(seriesMetadata doc.Document, data block.FetchBlockResult) error // MergeWith is an interface that the fs merger uses to merge data with. type MergeWith interface { diff --git a/src/dbnode/persist/fs/write.go b/src/dbnode/persist/fs/write.go index df4e70a8a7..5d9dc42f0c 100644 --- a/src/dbnode/persist/fs/write.go +++ b/src/dbnode/persist/fs/write.go @@ -29,13 +29,14 @@ import ( "sort" "time" + "github.com/m3db/m3/src/x/ident" + "github.com/m3db/bloom" "github.com/m3db/m3/src/dbnode/digest" "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs/msgpack" "github.com/m3db/m3/src/dbnode/persist/schema" "github.com/m3db/m3/src/x/checked" - "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/serialize" xtime "github.com/m3db/m3/src/x/time" @@ -81,14 +82,14 @@ type writer struct { encoder *msgpack.Encoder digestBuf digest.Buffer singleCheckedBytes []checked.Bytes + tagsIterator ident.TagsIterator tagEncoderPool serialize.TagEncoderPool err error } type indexEntry struct { index int64 - id ident.ID - tags ident.Tags + metadata persist.Metadata dataFileOffset int64 indexFileOffset int64 size uint32 @@ -98,7 +99,11 @@ type indexEntry struct { type indexEntries []indexEntry func (e indexEntries) releaseRefs() { - // memset zero loop optimization + // Close any metadata. + for _, elem := range e { + elem.metadata.Finalize() + } + // Apply memset zero loop optimization. var zeroed indexEntry for i := range e { e[i] = zeroed @@ -110,7 +115,7 @@ func (e indexEntries) Len() int { } func (e indexEntries) Less(i, j int) bool { - return bytes.Compare(e[i].id.Bytes(), e[j].id.Bytes()) < 0 + return bytes.Compare(e[i].metadata.BytesID(), e[j].metadata.BytesID()) < 0 } func (e indexEntries) Swap(i, j int) { @@ -139,6 +144,7 @@ func NewWriter(opts Options) (DataFileSetWriter, error) { encoder: msgpack.NewEncoder(), digestBuf: digest.NewBuffer(), singleCheckedBytes: make([]checked.Bytes, 1), + tagsIterator: ident.NewTagsIterator(ident.Tags{}), tagEncoderPool: opts.TagEncoderPool(), }, nil } @@ -252,18 +258,16 @@ func (w *writer) writeData(data []byte) error { } func (w *writer) Write( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, data checked.Bytes, checksum uint32, ) error { w.singleCheckedBytes[0] = data - return w.WriteAll(id, tags, w.singleCheckedBytes, checksum) + return w.WriteAll(metadata, w.singleCheckedBytes, checksum) } func (w *writer) WriteAll( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, data []checked.Bytes, checksum uint32, ) error { @@ -271,7 +275,7 @@ func (w *writer) WriteAll( return w.err } - if err := w.writeAll(id, tags, data, checksum); err != nil { + if err := w.writeAll(metadata, data, checksum); err != nil { w.err = err return err } @@ -279,8 +283,7 @@ func (w *writer) WriteAll( } func (w *writer) writeAll( - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, data []checked.Bytes, checksum uint32, ) error { @@ -297,8 +300,7 @@ func (w *writer) writeAll( entry := indexEntry{ index: w.currIdx, - id: id, - tags: tags, + metadata: metadata, dataFileOffset: w.currOffset, size: uint32(size), checksum: checksum, @@ -443,32 +445,35 @@ func (w *writer) writeIndexFileContents( sort.Sort(w.indexEntries) var ( - offset int64 - prevID []byte - tagsIter = ident.NewTagsIterator(ident.Tags{}) + offset int64 + prevID []byte + opts = persist.MetadataTagIteratorOptions{ + ReuseableTagsIterator: w.tagsIterator, + } tagsEncoder = w.tagEncoderPool.Get() ) defer tagsEncoder.Finalize() for i := range w.indexEntries { - id := w.indexEntries[i].id.Bytes() + id := w.indexEntries[i].metadata.BytesID() // Need to check if i > 0 or we can never write an empty string ID if i > 0 && bytes.Equal(id, prevID) { // Should never happen, Write() should only be called once per ID return fmt.Errorf("encountered duplicate ID: %s", id) } - var encodedTags []byte - if tags := w.indexEntries[i].tags; tags.Values() != nil { - tagsIter.Reset(tags) - tagsEncoder.Reset() - if err := tagsEncoder.Encode(tagsIter); err != nil { - return err - } - data, ok := tagsEncoder.Data() - if !ok { - return errWriterEncodeTagsDataNotAccessible - } - encodedTags = data.Bytes() + tagsIter, err := w.indexEntries[i].metadata.TagIterator(opts) + if err != nil { + return err + } + + tagsEncoder.Reset() + if err := tagsEncoder.Encode(tagsIter); err != nil { + return err + } + + encodedTags, ok := tagsEncoder.Data() + if !ok { + return errWriterEncodeTagsDataNotAccessible } entry := schema.IndexEntry{ @@ -477,7 +482,7 @@ func (w *writer) writeIndexFileContents( Size: int64(w.indexEntries[i].size), Offset: w.indexEntries[i].dataFileOffset, Checksum: int64(w.indexEntries[i].checksum), - EncodedTags: encodedTags, + EncodedTags: encodedTags.Bytes(), } w.encoder.Reset() @@ -520,7 +525,7 @@ func (w *writer) writeSummariesFileContents( summary := schema.IndexSummary{ Index: w.indexEntries[i].index, - ID: w.indexEntries[i].id.Bytes(), + ID: w.indexEntries[i].metadata.BytesID(), IndexEntryOffset: w.indexEntries[i].indexFileOffset, } diff --git a/src/dbnode/persist/persist_mock.go b/src/dbnode/persist/persist_mock.go index 1ec52dcd02..a9bf749a9f 100644 --- a/src/dbnode/persist/persist_mock.go +++ b/src/dbnode/persist/persist_mock.go @@ -1,34 +1,13 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/dbnode/persist/types.go - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/persist/types.go // Package persist is a generated GoMock package. package persist import ( - "reflect" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" + gomock "github.com/golang/mock/gomock" + uuid "github.com/pborman/uuid" + reflect "reflect" ) // MockManager is a mock of Manager interface diff --git a/src/dbnode/persist/types.go b/src/dbnode/persist/types.go index 932d256909..0e065b154d 100644 --- a/src/dbnode/persist/types.go +++ b/src/dbnode/persist/types.go @@ -21,11 +21,13 @@ package persist import ( + "errors" "fmt" "time" "github.com/m3db/m3/src/dbnode/namespace" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/m3ninx/index/segment" idxpersist "github.com/m3db/m3/src/m3ninx/persist" "github.com/m3db/m3/src/x/ident" @@ -33,8 +35,119 @@ import ( "github.com/pborman/uuid" ) +var ( + errReuseableTagIteratorRequired = errors.New("reuseable tags iterator is required") +) + +// Metadata is metadata for a time series, it can +// have several underlying sources. +type Metadata struct { + metadata doc.Document + id ident.ID + tags ident.Tags + tagsIter ident.TagIterator + opts MetadataOptions +} + +// MetadataOptions is options to use when creating metadata. +type MetadataOptions struct { + FinalizeID bool + FinalizeTags bool + FinalizeTagIterator bool +} + +// NewMetadata returns a new metadata struct from series metadata. +func NewMetadata(metadata doc.Document) Metadata { + return Metadata{metadata: metadata} +} + +// NewMetadataFromIDAndTags returns a new metadata struct from +// explicit ID and tags. +func NewMetadataFromIDAndTags( + id ident.ID, + tags ident.Tags, + opts MetadataOptions, +) Metadata { + return Metadata{ + id: id, + tags: tags, + opts: opts, + } +} + +// NewMetadataFromIDAndTagIterator returns a new metadata struct from +// explicit ID and tag iterator. +func NewMetadataFromIDAndTagIterator( + id ident.ID, + tagsIter ident.TagIterator, + opts MetadataOptions, +) Metadata { + return Metadata{ + id: id, + tagsIter: tagsIter, + opts: opts, + } +} + +// BytesID returns the bytes ID of the series. +func (m Metadata) BytesID() []byte { + if m.id != nil { + return m.id.Bytes() + } + return m.metadata.ID +} + +// MetadataTagIteratorOptions are options required to be passed +// to metadata TagIterator to retrieve a tag iterator. +type MetadataTagIteratorOptions struct { + ReuseableTagsIterator ident.TagsIterator +} + +// TagIterator returns a tag iterator for the series, +// returning a direct ref to a provided tag iterator +// or using the reuseable tag iterator provided by the +// callsite if it needs to iterate over tags or fields. +func (m Metadata) TagIterator( + opts MetadataTagIteratorOptions, +) (ident.TagIterator, error) { + reuseable := opts.ReuseableTagsIterator + if reuseable == nil { + // Always check to make sure callsites won't + // get a bad allocation pattern of having + // to create one here inline if the metadata + // they are passing in suddenly changes from + // tagsIter to tags or fields with metadata. + return nil, errReuseableTagIteratorRequired + } + if m.tagsIter != nil { + return m.tagsIter, nil + } + + if len(m.tags.Values()) > 0 { + reuseable.Reset(m.tags) + return reuseable, nil + } + + reuseable.ResetFields(m.metadata.Fields) + return reuseable, nil +} + +// Finalize will finalize any resources that requested +// to be finalized. +func (m Metadata) Finalize() { + if m.opts.FinalizeID { + m.id.Finalize() + } + if m.opts.FinalizeTags { + m.tags.Finalize() + } + if m.opts.FinalizeTagIterator { + m.tagsIter.Close() + } +} + // DataFn is a function that persists a m3db segment for a given ID. -type DataFn func(id ident.ID, tags ident.Tags, segment ts.Segment, checksum uint32) error +type DataFn func(metadata Metadata, segment ts.Segment, checksum uint32) error // DataCloser is a function that performs cleanup after persisting the data // blocks for a (shard, blockStart) combination. @@ -213,11 +326,10 @@ const ( // OnFlushNewSeriesEvent is the fields related to a flush of a new series. type OnFlushNewSeriesEvent struct { - Shard uint32 - BlockStart time.Time - FirstWrite time.Time - ID ident.ID - Tags ident.Tags + Shard uint32 + BlockStart time.Time + FirstWrite time.Time + SeriesMetadata doc.Document } // OnFlushSeries performs work on a per series level. diff --git a/src/dbnode/storage/bootstrap/bootstrap_mock.go b/src/dbnode/storage/bootstrap/bootstrap_mock.go index f3e2297922..81e1493955 100644 --- a/src/dbnode/storage/bootstrap/bootstrap_mock.go +++ b/src/dbnode/storage/bootstrap/bootstrap_mock.go @@ -1,40 +1,18 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/dbnode/storage/bootstrap/types.go - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/storage/bootstrap/types.go // Package bootstrap is a generated GoMock package. package bootstrap import ( - "reflect" - "time" - - "github.com/m3db/m3/src/dbnode/namespace" - "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" - "github.com/m3db/m3/src/dbnode/topology" - "github.com/m3db/m3/src/x/context" - "github.com/m3db/m3/src/x/ident" - - "github.com/golang/mock/gomock" + gomock "github.com/golang/mock/gomock" + namespace "github.com/m3db/m3/src/dbnode/namespace" + result "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" + topology "github.com/m3db/m3/src/dbnode/topology" + context "github.com/m3db/m3/src/x/context" + ident "github.com/m3db/m3/src/x/ident" + reflect "reflect" + time "time" ) // MockProcessProvider is a mock of ProcessProvider interface diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go index 90181c67a8..f95bad14b6 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go @@ -435,21 +435,6 @@ func (s *commitLogSource) Read( // Resolve the series in the accumulator. accumulator := ns.accumulator - // NB(r): Make sure that only series.EncodedTags are used and not - // series.Tags (we explicitly ask for references to be returned and to - // avoid decoding the tags if we don't have to). - if decodedTags := len(entry.Series.Tags.Values()); decodedTags > 0 { - msg := "commit log reader expects encoded tags" - instrumentOpts := s.opts.ResultOptions().InstrumentOptions() - instrument.EmitAndLogInvariantViolation(instrumentOpts, func(l *zap.Logger) { - l.Error(msg, - zap.Int("decodedTags", decodedTags), - zap.Int("encodedTags", len(entry.Series.EncodedTags))) - }) - err := instrument.InvariantErrorf(fmt.Sprintf("%s: decoded=%d", msg, decodedTags)) - return bootstrap.NamespaceResults{}, err - } - var tagIter ident.TagIterator if len(entry.Series.EncodedTags) > 0 { tagDecoderCheckedBytes.Reset(entry.Series.EncodedTags) diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go index be7f8de4f8..9e6dfab1bc 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go @@ -652,7 +652,7 @@ func (s *fileSystemSource) readNextEntryAndMaybeIndex( return batch, err } - d, err := convert.FromMetricIter(id, tagsIter) + d, err := convert.FromSeriesIDAndTagIter(id, tagsIter) // Finalize the ID and tags. id.Finalize() tagsIter.Close() diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go index 423596a495..67ebb99eeb 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go @@ -572,7 +572,9 @@ func (s *peersSource) flush( break } - err = prepared.Persist(s.ID, s.Tags, segment, checksum) + metadata := persist.NewMetadataFromIDAndTags(s.ID, s.Tags, + persist.MetadataOptions{}) + err = prepared.Persist(metadata, segment, checksum) flushCtx.BlockingCloseReset() if err != nil { blockErr = err // Need to call prepared.Close, avoid return @@ -707,7 +709,7 @@ func (s *peersSource) readNextEntryAndMaybeIndex( return batch, err } - d, err := convert.FromMetricIter(id, tagsIter) + d, err := convert.FromSeriesIDAndTagIter(id, tagsIter) // Finalize the ID and tags. id.Finalize() tagsIter.Close() @@ -936,7 +938,7 @@ func (s *peersSource) readBlockMetadataAndIndex( batch []doc.Document, flushBatch func() error, ) (bool, error) { - d, err := convert.FromMetric(dataBlock.ID, dataBlock.Tags) + d, err := convert.FromSeriesIDAndTags(dataBlock.ID, dataBlock.Tags) if err != nil { return false, err } diff --git a/src/dbnode/storage/database_test.go b/src/dbnode/storage/database_test.go index d58be5825f..1b06cc6725 100644 --- a/src/dbnode/storage/database_test.go +++ b/src/dbnode/storage/database_test.go @@ -795,7 +795,6 @@ func testDatabaseNamespaceIndexFunctions(t *testing.T, commitlogEnabled bool) { tagsIter = ident.EmptyTagIterator s = ts.Series{ ID: id, - Tags: ident.Tags{}, Namespace: namespace, } ) @@ -1060,7 +1059,6 @@ func testDatabaseWriteBatch(t *testing.T, ts.Series{ ID: ident.StringID(write.series + "-updated"), Namespace: namespace, - Tags: ident.Tags{}, }, wasWritten, write.err) } else { batchWriter.Add(i*2, ident.StringID(write.series), @@ -1071,7 +1069,6 @@ func testDatabaseWriteBatch(t *testing.T, ts.Series{ ID: ident.StringID(write.series + "-updated"), Namespace: namespace, - Tags: ident.Tags{}, }, wasWritten, write.err) } i++ diff --git a/src/dbnode/storage/dirty_series_new_map_gen.go b/src/dbnode/storage/dirty_series_new_map_gen.go index 02e8cd5d2f..72721800fc 100644 --- a/src/dbnode/storage/dirty_series_new_map_gen.go +++ b/src/dbnode/storage/dirty_series_new_map_gen.go @@ -25,8 +25,7 @@ package storage import ( - "github.com/m3db/m3/src/x/ident" - "github.com/m3db/m3/src/x/pool" + "bytes" "github.com/cespare/xxhash/v2" ) @@ -34,51 +33,26 @@ import ( // dirtySeriesMapOptions provides options used when created the map. type dirtySeriesMapOptions struct { InitialSize int - KeyCopyPool pool.BytesPool } // newDirtySeriesMap returns a new byte keyed map. func newDirtySeriesMap(opts dirtySeriesMapOptions) *dirtySeriesMap { - var ( - copyFn dirtySeriesMapCopyFn - finalizeFn dirtySeriesMapFinalizeFn - ) - if pool := opts.KeyCopyPool; pool == nil { - copyFn = func(k idAndBlockStart) idAndBlockStart { - return idAndBlockStart{ - id: ident.BytesID(append([]byte(nil), k.id.Bytes()...)), - blockStart: k.blockStart, - } - } - } else { - copyFn = func(k idAndBlockStart) idAndBlockStart { - bytes := k.id.Bytes() - keyLen := len(bytes) - pooled := pool.Get(keyLen)[:keyLen] - copy(pooled, bytes) - return idAndBlockStart{ - id: ident.BytesID(pooled), - blockStart: k.blockStart, - } - } - finalizeFn = func(k idAndBlockStart) { - if slice, ok := k.id.(ident.BytesID); ok { - pool.Put(slice) - } - } - } return _dirtySeriesMapAlloc(_dirtySeriesMapOptions{ hash: func(k idAndBlockStart) dirtySeriesMapHash { hash := uint64(7) - hash = 31*hash + xxhash.Sum64(k.id.Bytes()) + hash = 31*hash + xxhash.Sum64(k.id) hash = 31*hash + uint64(k.blockStart) return dirtySeriesMapHash(hash) }, equals: func(x, y idAndBlockStart) bool { - return x.id.Equal(y.id) && x.blockStart == y.blockStart + return bytes.Equal(x.id, y.id) && x.blockStart == y.blockStart + }, + copy: func(k idAndBlockStart) idAndBlockStart { + return idAndBlockStart{ + id: k.id, + blockStart: k.blockStart, + } }, - copy: copyFn, - finalize: finalizeFn, initialSize: opts.InitialSize, }) } diff --git a/src/dbnode/storage/fs_merge_with_mem.go b/src/dbnode/storage/fs_merge_with_mem.go index 98e307afe0..ab409c1d17 100644 --- a/src/dbnode/storage/fs_merge_with_mem.go +++ b/src/dbnode/storage/fs_merge_with_mem.go @@ -42,6 +42,7 @@ type fsMergeWithMem struct { retriever series.QueryableBlockRetriever dirtySeries *dirtySeriesMap dirtySeriesToWrite map[xtime.UnixNano]*idList + reuseableID *ident.ReuseableBytesID } func newFSMergeWithMem( @@ -55,6 +56,7 @@ func newFSMergeWithMem( retriever: retriever, dirtySeries: dirtySeries, dirtySeriesToWrite: dirtySeriesToWrite, + reuseableID: ident.NewReuseableBytesID(), } } @@ -65,7 +67,10 @@ func (m *fsMergeWithMem) Read( nsCtx namespace.Context, ) ([]xio.BlockReader, bool, error) { // Check if this series is in memory (and thus requires merging). - element, exists := m.dirtySeries.Get(idAndBlockStart{blockStart: blockStart, id: seriesID}) + element, exists := m.dirtySeries.Get(idAndBlockStart{ + blockStart: blockStart, + id: seriesID.Bytes(), + }) if !exists { return nil, false, nil } @@ -76,7 +81,7 @@ func (m *fsMergeWithMem) Read( // it. m.dirtySeriesToWrite[blockStart].Remove(element) - result, ok, err := m.fetchBlocks(ctx, element.Value, blockStart, nsCtx) + result, ok, err := m.fetchBlocks(ctx, seriesID, blockStart, nsCtx) if err != nil { return nil, false, err } @@ -118,33 +123,19 @@ func (m *fsMergeWithMem) ForEachRemaining( fn fs.ForEachRemainingFn, nsCtx namespace.Context, ) error { + reuseableID := m.reuseableID seriesList := m.dirtySeriesToWrite[blockStart] for seriesElement := seriesList.Front(); seriesElement != nil; seriesElement = seriesElement.Next() { - seriesID := seriesElement.Value - - // TODO(r): We should really not be looking this up per series element - // and just keep it in the linked list next to the series ID. - tags, ok, err := m.shard.TagsFromSeriesID(seriesID) + seriesMetadata := seriesElement.Value + reuseableID.Reset(seriesMetadata.ID) + mergeWithData, hasData, err := m.fetchBlocks(ctx, reuseableID, blockStart, nsCtx) if err != nil { return err } - if !ok { - // Receiving not ok means that the series was not found, for some - // reason like it falling out of retention, therefore we skip this - // series and continue. - // TODO(r): This should actually be an invariant error - these should not - // be evicted until a flush otherwise the durability guarantee was not - // upheld. - continue - } - mergeWithData, hasData, err := m.fetchBlocks(ctx, seriesID, blockStart, nsCtx) - if err != nil { - return err - } if hasData { - err = fn(seriesID, tags, mergeWithData) + err = fn(seriesMetadata, mergeWithData) if err != nil { return err } diff --git a/src/dbnode/storage/fs_merge_with_mem_test.go b/src/dbnode/storage/fs_merge_with_mem_test.go index 4765484e3e..de8f599bb9 100644 --- a/src/dbnode/storage/fs_merge_with_mem_test.go +++ b/src/dbnode/storage/fs_merge_with_mem_test.go @@ -28,6 +28,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/series" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" xtime "github.com/m3db/m3/src/x/time" @@ -82,7 +83,10 @@ func TestRead(t *testing.T) { mergeWith := newFSMergeWithMem(shard, retriever, dirtySeries, dirtySeriesToWrite) for _, d := range data { - require.True(t, dirtySeries.Contains(idAndBlockStart{blockStart: d.start, id: d.id})) + require.True(t, dirtySeries.Contains(idAndBlockStart{ + blockStart: d.start, + id: d.id.Bytes(), + })) beforeLen := dirtySeriesToWrite[d.start].Len() res, exists, err := mergeWith.Read(ctx, d.id, d.start, nsCtx) require.NoError(t, err) @@ -167,21 +171,20 @@ func TestForEachRemaining(t *testing.T) { mergeWith := newFSMergeWithMem(shard, retriever, dirtySeries, dirtySeriesToWrite) - var forEachCalls []ident.ID - shard.EXPECT().TagsFromSeriesID(gomock.Any()).Return(ident.Tags{}, true, nil).Times(2) + var forEachCalls []doc.Document shard.EXPECT(). FetchBlocksForColdFlush(gomock.Any(), id0, xtime.UnixNano(0).ToTime(), version+1, gomock.Any()). Return(result, nil) shard.EXPECT(). FetchBlocksForColdFlush(gomock.Any(), id1, xtime.UnixNano(0).ToTime(), version+1, gomock.Any()). Return(result, nil) - mergeWith.ForEachRemaining(ctx, 0, func(seriesID ident.ID, tags ident.Tags, result block.FetchBlockResult) error { - forEachCalls = append(forEachCalls, seriesID) + mergeWith.ForEachRemaining(ctx, 0, func(seriesMetadata doc.Document, result block.FetchBlockResult) error { + forEachCalls = append(forEachCalls, seriesMetadata) return nil }, nsCtx) require.Len(t, forEachCalls, 2) - assert.Equal(t, id0, forEachCalls[0]) - assert.Equal(t, id1, forEachCalls[1]) + assert.Equal(t, id0.Bytes(), forEachCalls[0].ID) + assert.Equal(t, id1.Bytes(), forEachCalls[1].ID) // Reset expected calls. forEachCalls = forEachCalls[:0] @@ -194,15 +197,14 @@ func TestForEachRemaining(t *testing.T) { require.NoError(t, err) assert.True(t, exists) assert.Equal(t, result.Blocks, res) - shard.EXPECT().TagsFromSeriesID(gomock.Any()).Return(ident.Tags{}, true, nil).Times(2) shard.EXPECT(). FetchBlocksForColdFlush(gomock.Any(), id2, xtime.UnixNano(1).ToTime(), version+1, gomock.Any()). Return(result, nil) shard.EXPECT(). FetchBlocksForColdFlush(gomock.Any(), id4, xtime.UnixNano(1).ToTime(), version+1, gomock.Any()). Return(result, nil) - err = mergeWith.ForEachRemaining(ctx, 1, func(seriesID ident.ID, tags ident.Tags, result block.FetchBlockResult) error { - forEachCalls = append(forEachCalls, seriesID) + err = mergeWith.ForEachRemaining(ctx, 1, func(seriesMetadata doc.Document, result block.FetchBlockResult) error { + forEachCalls = append(forEachCalls, seriesMetadata) return nil }, nsCtx) require.NoError(t, err) @@ -211,21 +213,17 @@ func TestForEachRemaining(t *testing.T) { assert.Equal(t, id4, forEachCalls[1]) // Test call with error getting tags. - shard.EXPECT(). - TagsFromSeriesID(gomock.Any()).Return(ident.Tags{}, false, errors.New("bad-tags")) shard.EXPECT(). FetchBlocksForColdFlush(gomock.Any(), id8, xtime.UnixNano(4).ToTime(), version+1, gomock.Any()). Return(result, nil) - err = mergeWith.ForEachRemaining(ctx, 4, func(seriesID ident.ID, tags ident.Tags, result block.FetchBlockResult) error { + err = mergeWith.ForEachRemaining(ctx, 4, func(seriesMetadata doc.Document, result block.FetchBlockResult) error { // This function won't be called with the above error. return errors.New("unreachable") }, nsCtx) assert.Error(t, err) // Test call with bad function execution. - shard.EXPECT(). - TagsFromSeriesID(gomock.Any()).Return(ident.Tags{}, true, nil) - err = mergeWith.ForEachRemaining(ctx, 4, func(seriesID ident.ID, tags ident.Tags, result block.FetchBlockResult) error { + err = mergeWith.ForEachRemaining(ctx, 4, func(seriesMetadata doc.Document, result block.FetchBlockResult) error { return errors.New("bad") }, nsCtx) assert.Error(t, err) @@ -242,7 +240,7 @@ func addDirtySeries( seriesList = newIDList(nil) dirtySeriesToWrite[start] = seriesList } - element := seriesList.PushBack(id) + element := seriesList.PushBack(doc.Document{ID: id.Bytes()}) - dirtySeries.Set(idAndBlockStart{blockStart: start, id: id}, element) + dirtySeries.Set(idAndBlockStart{blockStart: start, id: id.Bytes()}, element) } diff --git a/src/dbnode/storage/id_list_gen.go b/src/dbnode/storage/id_list_gen.go index e66102dace..4b786afaa6 100644 --- a/src/dbnode/storage/id_list_gen.go +++ b/src/dbnode/storage/id_list_gen.go @@ -25,7 +25,7 @@ package storage import ( - "github.com/m3db/m3/src/x/ident" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/pool" ) @@ -97,7 +97,7 @@ type idElement struct { list *idList // The value stored with this element. - Value ident.ID + Value doc.Document } // Next returns the next list element or nil. @@ -181,7 +181,7 @@ func (l *idList) insert(e, at *idElement) *idElement { } // insertValue is a convenience wrapper for inserting using the list's pool. -func (l *idList) insertValue(v ident.ID, at *idElement) *idElement { +func (l *idList) insertValue(v doc.Document, at *idElement) *idElement { e := l.Pool.get() e.Value = v return l.insert(e, at) @@ -201,7 +201,7 @@ func (l *idList) remove(e *idElement) *idElement { // Remove removes e from l if e is an element of list l. // It returns the element value e.Value. // The element must not be nil. -func (l *idList) Remove(e *idElement) ident.ID { +func (l *idList) Remove(e *idElement) doc.Document { if e.list == l { // if e.list == l, l must have been initialized when e was inserted // in l or l == nil (e is a zero Element) and l.remove will crash. @@ -212,13 +212,13 @@ func (l *idList) Remove(e *idElement) ident.ID { } // PushFront inserts a new element e with value v at the front of list l and returns e. -func (l *idList) PushFront(v ident.ID) *idElement { +func (l *idList) PushFront(v doc.Document) *idElement { l.lazyInit() return l.insertValue(v, &l.root) } // PushBack inserts a new element e with value v at the back of list l and returns e. -func (l *idList) PushBack(v ident.ID) *idElement { +func (l *idList) PushBack(v doc.Document) *idElement { l.lazyInit() return l.insertValue(v, l.root.prev) } @@ -226,7 +226,7 @@ func (l *idList) PushBack(v ident.ID) *idElement { // InsertBefore inserts a new element e with value v immediately before mark and returns e. // If mark is not an element of l, the list is not modified. // The mark must not be nil. -func (l *idList) InsertBefore(v ident.ID, mark *idElement) *idElement { +func (l *idList) InsertBefore(v doc.Document, mark *idElement) *idElement { if mark.list != l { return nil } @@ -237,7 +237,7 @@ func (l *idList) InsertBefore(v ident.ID, mark *idElement) *idElement { // InsertAfter inserts a new element e with value v immediately after mark and returns e. // If mark is not an element of l, the list is not modified. // The mark must not be nil. -func (l *idList) InsertAfter(v ident.ID, mark *idElement) *idElement { +func (l *idList) InsertAfter(v doc.Document, mark *idElement) *idElement { if mark.list != l { return nil } diff --git a/src/dbnode/storage/index.go b/src/dbnode/storage/index.go index b8730924fb..7b67b9a386 100644 --- a/src/dbnode/storage/index.go +++ b/src/dbnode/storage/index.go @@ -1062,7 +1062,7 @@ func (i *nsIndex) flushBlockSegment( } for _, result := range results.Results() { - doc, err := convert.FromMetricIter(result.ID, result.Tags) + doc, err := convert.FromSeriesIDAndTagIter(result.ID, result.Tags) if err != nil { return err } diff --git a/src/dbnode/storage/index/convert/convert.go b/src/dbnode/storage/index/convert/convert.go index bc4d58d21d..f85077a8f8 100644 --- a/src/dbnode/storage/index/convert/convert.go +++ b/src/dbnode/storage/index/convert/convert.go @@ -46,7 +46,7 @@ var ( "corrupt data, unable to extract id") ) -// ValidateSeries will validate a metric for use with m3ninx. +// ValidateSeries will validate a series for use with m3ninx. func ValidateSeries(id ident.ID, tags ident.Tags) error { if idBytes := id.Bytes(); !utf8.Valid(idBytes) { return fmt.Errorf("series has invalid ID: id=%s, id_hex=%x", @@ -79,17 +79,11 @@ func ValidateSeriesTag(tag ident.Tag) error { return nil } -// FromMetric converts the provided metric id+tags into a document. -// FOLLOWUP(r): Rename FromMetric to FromSeries (metric terminiology -// is not common in the codebase) -func FromMetric(id ident.ID, tags ident.Tags) (doc.Document, error) { +// FromSeriesIDAndTags converts the provided series id+tags into a document. +func FromSeriesIDAndTags(id ident.ID, tags ident.Tags) (doc.Document, error) { clonedID := clone(id) fields := make([]doc.Field, 0, len(tags.Values())) for _, tag := range tags.Values() { - if bytes.Equal(ReservedFieldNameID, tag.Name.Bytes()) { - return doc.Document{}, ErrUsingReservedFieldName - } - nameBytes, valueBytes := tag.Name.Bytes(), tag.Value.Bytes() var clonedName, clonedValue []byte @@ -109,42 +103,23 @@ func FromMetric(id ident.ID, tags ident.Tags) (doc.Document, error) { Value: clonedValue, }) } - return doc.Document{ + + d := doc.Document{ ID: clonedID, Fields: fields, - }, nil -} - -// FromMetricNoClone converts the provided metric id+tags into a document without cloning. -func FromMetricNoClone(id ident.ID, tags ident.Tags) (doc.Document, error) { - fields := make([]doc.Field, 0, len(tags.Values())) - for _, tag := range tags.Values() { - if bytes.Equal(ReservedFieldNameID, tag.Name.Bytes()) { - return doc.Document{}, ErrUsingReservedFieldName - } - fields = append(fields, doc.Field{ - Name: tag.Name.Bytes(), - Value: tag.Value.Bytes(), - }) } - return doc.Document{ - ID: id.Bytes(), - Fields: fields, - }, nil + if err := d.Validate(); err != nil { + return doc.Document{}, err + } + return d, nil } -// FromMetricIter converts the provided metric id+tags into a document. -// FOLLOWUP(r): Rename FromMetric to FromSeries (metric terminiology -// is not common in the codebase) -func FromMetricIter(id ident.ID, tags ident.TagIterator) (doc.Document, error) { +// FromSeriesIDAndTagIter converts the provided series id+tags into a document. +func FromSeriesIDAndTagIter(id ident.ID, tags ident.TagIterator) (doc.Document, error) { clonedID := clone(id) fields := make([]doc.Field, 0, tags.Remaining()) for tags.Next() { tag := tags.Current() - if bytes.Equal(ReservedFieldNameID, tag.Name.Bytes()) { - return doc.Document{}, ErrUsingReservedFieldName - } - nameBytes, valueBytes := tag.Name.Bytes(), tag.Value.Bytes() var clonedName, clonedValue []byte @@ -167,33 +142,15 @@ func FromMetricIter(id ident.ID, tags ident.TagIterator) (doc.Document, error) { if err := tags.Err(); err != nil { return doc.Document{}, err } - return doc.Document{ + + d := doc.Document{ ID: clonedID, Fields: fields, - }, nil -} - -// FromMetricIterNoClone converts the provided metric id+tags iterator into a -// document without cloning. -func FromMetricIterNoClone(id ident.ID, tags ident.TagIterator) (doc.Document, error) { - fields := make([]doc.Field, 0, tags.Remaining()) - for tags.Next() { - tag := tags.Current() - if bytes.Equal(ReservedFieldNameID, tag.Name.Bytes()) { - return doc.Document{}, ErrUsingReservedFieldName - } - fields = append(fields, doc.Field{ - Name: tag.Name.Bytes(), - Value: tag.Value.Bytes(), - }) } - if err := tags.Err(); err != nil { + if err := d.Validate(); err != nil { return doc.Document{}, err } - return doc.Document{ - ID: id.Bytes(), - Fields: fields, - }, nil + return d, nil } // TagsFromTagsIter returns an ident.Tags from a TagIterator. It also tries diff --git a/src/dbnode/storage/index/convert/convert_test.go b/src/dbnode/storage/index/convert/convert_test.go index afb848cd4b..1a04f77f3f 100644 --- a/src/dbnode/storage/index/convert/convert_test.go +++ b/src/dbnode/storage/index/convert/convert_test.go @@ -58,15 +58,6 @@ func TestFromMetricInvalid(t *testing.T) { assert.Error(t, err) } -func TestFromMetricNoCloneInvalid(t *testing.T) { - id := ident.StringID("foo") - tags := ident.NewTags( - ident.StringTag(string(convert.ReservedFieldNameID), "value"), - ) - _, err := convert.FromMetricNoClone(id, tags) - assert.Error(t, err) -} - func TestFromMetricIteratorInvalid(t *testing.T) { id := ident.StringID("foo") tags := ident.NewTags( @@ -89,19 +80,6 @@ func TestFromMetricValid(t *testing.T) { assert.Equal(t, "baz", string(d.Fields[0].Value)) } -func TestFromMetricNoCloneValid(t *testing.T) { - id := ident.StringID("foo") - tags := ident.NewTags( - ident.StringTag("bar", "baz"), - ) - d, err := convert.FromMetricNoClone(id, tags) - assert.NoError(t, err) - assert.Equal(t, "foo", string(d.ID)) - assert.Len(t, d.Fields, 1) - assert.Equal(t, "bar", string(d.Fields[0].Name)) - assert.Equal(t, "baz", string(d.Fields[0].Value)) -} - func TestFromMetricIterValid(t *testing.T) { id := ident.StringID("foo") tags := ident.NewTags( diff --git a/src/dbnode/storage/index_query_concurrent_test.go b/src/dbnode/storage/index_query_concurrent_test.go index a23b7f988a..07bb9eac75 100644 --- a/src/dbnode/storage/index_query_concurrent_test.go +++ b/src/dbnode/storage/index_query_concurrent_test.go @@ -347,7 +347,7 @@ func testNamespaceIndexHighConcurrentQueries( for _, entry := range results.Results.Map().Iter() { id := entry.Key().String() - doc, err := convert.FromMetricIterNoClone(entry.Key(), entry.Value()) + doc, err := convert.FromMetricIter(entry.Key(), entry.Value()) require.NoError(t, err) if err != nil { continue // this will fail the test anyway, but don't want to panic diff --git a/src/dbnode/storage/namespace.go b/src/dbnode/storage/namespace.go index 46764245fc..61c6fa9ecb 100644 --- a/src/dbnode/storage/namespace.go +++ b/src/dbnode/storage/namespace.go @@ -1093,7 +1093,7 @@ func (n *dbNamespace) WarmFlush( // idAndBlockStart is the composite key for the genny map used to keep track of // dirty series that need to be ColdFlushed. type idAndBlockStart struct { - id ident.ID + id []byte blockStart xtime.UnixNano } diff --git a/src/dbnode/storage/series/buffer.go b/src/dbnode/storage/series/buffer.go index 8b41fb03b3..876a99e2d2 100644 --- a/src/dbnode/storage/series/buffer.go +++ b/src/dbnode/storage/series/buffer.go @@ -77,6 +77,7 @@ const ( type databaseBuffer interface { Write( ctx context.Context, + id ident.ID, timestamp time.Time, value float64, unit xtime.Unit, @@ -87,8 +88,7 @@ type databaseBuffer interface { Snapshot( ctx context.Context, blockStart time.Time, - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context, ) error @@ -96,8 +96,7 @@ type databaseBuffer interface { WarmFlush( ctx context.Context, blockStart time.Time, - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context, ) (FlushOutcome, error) @@ -141,7 +140,6 @@ type databaseBuffer interface { } type databaseBufferResetOptions struct { - ID ident.ID BlockRetriever QueryableBlockRetriever Options Options } @@ -214,7 +212,6 @@ func (t *OptimizedTimes) ForEach(fn func(t xtime.UnixNano)) { } type dbBuffer struct { - id ident.ID opts Options nowFn clock.NowFn @@ -243,7 +240,6 @@ func newDatabaseBuffer() databaseBuffer { } func (b *dbBuffer) Reset(opts databaseBufferResetOptions) { - b.id = opts.ID b.opts = opts.Options b.nowFn = opts.Options.ClockOptions().NowFn() b.bucketPool = opts.Options.BufferBucketPool() @@ -253,6 +249,7 @@ func (b *dbBuffer) Reset(opts databaseBufferResetOptions) { func (b *dbBuffer) Write( ctx context.Context, + id ident.ID, timestamp time.Time, value float64, unit xtime.Unit, @@ -293,7 +290,7 @@ func (b *dbBuffer) Write( fmt.Errorf("datapoint too far in past: "+ "id=%s, off_by=%s, timestamp=%s, past_limit=%s, "+ "timestamp_unix_nanos=%d, past_limit_unix_nanos=%d", - b.id.Bytes(), pastLimit.Sub(timestamp).String(), + id.Bytes(), pastLimit.Sub(timestamp).String(), timestamp.Format(errTimestampFormat), pastLimit.Format(errTimestampFormat), timestamp.UnixNano(), pastLimit.UnixNano())) @@ -306,7 +303,7 @@ func (b *dbBuffer) Write( fmt.Errorf("datapoint too far in future: "+ "id=%s, off_by=%s, timestamp=%s, future_limit=%s, "+ "timestamp_unix_nanos=%d, future_limit_unix_nanos=%d", - b.id.Bytes(), timestamp.Sub(futureLimit).String(), + id.Bytes(), timestamp.Sub(futureLimit).String(), timestamp.Format(errTimestampFormat), futureLimit.Format(errTimestampFormat), timestamp.UnixNano(), futureLimit.UnixNano())) @@ -334,7 +331,7 @@ func (b *dbBuffer) Write( fmt.Errorf("datapoint too far in past and out of retention: "+ "id=%s, off_by=%s, timestamp=%s, retention_past_limit=%s, "+ "timestamp_unix_nanos=%d, retention_past_limit_unix_nanos=%d", - b.id.Bytes(), retentionLimit.Sub(timestamp).String(), + id.Bytes(), retentionLimit.Sub(timestamp).String(), timestamp.Format(errTimestampFormat), retentionLimit.Format(errTimestampFormat), timestamp.UnixNano(), retentionLimit.UnixNano())) @@ -351,7 +348,7 @@ func (b *dbBuffer) Write( fmt.Errorf("datapoint too far in future and out of retention: "+ "id=%s, off_by=%s, timestamp=%s, retention_future_limit=%s, "+ "timestamp_unix_nanos=%d, retention_future_limit_unix_nanos=%d", - b.id.Bytes(), timestamp.Sub(futureRetentionLimit).String(), + id.Bytes(), timestamp.Sub(futureRetentionLimit).String(), timestamp.Format(errTimestampFormat), futureRetentionLimit.Format(errTimestampFormat), timestamp.UnixNano(), futureRetentionLimit.UnixNano())) @@ -491,8 +488,7 @@ func (b *dbBuffer) Load(bl block.DatabaseBlock, writeType WriteType) { func (b *dbBuffer) Snapshot( ctx context.Context, blockStart time.Time, - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context, ) error { @@ -560,14 +556,13 @@ func (b *dbBuffer) Snapshot( } checksum := segment.CalculateChecksum() - return persistFn(id, tags, segment, checksum) + return persistFn(metadata, segment, checksum) } func (b *dbBuffer) WarmFlush( ctx context.Context, blockStart time.Time, - id ident.ID, - tags ident.Tags, + metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context, ) (FlushOutcome, error) { @@ -621,7 +616,7 @@ func (b *dbBuffer) WarmFlush( } checksum := segment.CalculateChecksum() - err = persistFn(id, tags, segment, checksum) + err = persistFn(metadata, segment, checksum) if err != nil { return FlushOutcomeErr, err } diff --git a/src/dbnode/storage/series/buffer_mock.go b/src/dbnode/storage/series/buffer_mock.go index 79ea909a63..55afef6d27 100644 --- a/src/dbnode/storage/series/buffer_mock.go +++ b/src/dbnode/storage/series/buffer_mock.go @@ -1,42 +1,20 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/dbnode/storage/series/buffer.go - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/storage/series/buffer.go // Package series is a generated GoMock package. package series import ( - "reflect" - "time" - - "github.com/m3db/m3/src/dbnode/namespace" - "github.com/m3db/m3/src/dbnode/persist" - "github.com/m3db/m3/src/dbnode/storage/block" - "github.com/m3db/m3/src/dbnode/x/xio" - "github.com/m3db/m3/src/x/context" - "github.com/m3db/m3/src/x/ident" - time0 "github.com/m3db/m3/src/x/time" - - "github.com/golang/mock/gomock" + gomock "github.com/golang/mock/gomock" + namespace "github.com/m3db/m3/src/dbnode/namespace" + persist "github.com/m3db/m3/src/dbnode/persist" + block "github.com/m3db/m3/src/dbnode/storage/block" + xio "github.com/m3db/m3/src/dbnode/x/xio" + context "github.com/m3db/m3/src/x/context" + ident "github.com/m3db/m3/src/x/ident" + time "github.com/m3db/m3/src/x/time" + reflect "reflect" + time0 "time" ) // MockdatabaseBuffer is a mock of databaseBuffer interface @@ -63,9 +41,9 @@ func (m *MockdatabaseBuffer) EXPECT() *MockdatabaseBufferMockRecorder { } // Write mocks base method -func (m *MockdatabaseBuffer) Write(ctx context.Context, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts WriteOptions) (bool, WriteType, error) { +func (m *MockdatabaseBuffer) Write(ctx context.Context, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte, wOpts WriteOptions) (bool, WriteType, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", ctx, timestamp, value, unit, annotation, wOpts) + ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation, wOpts) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(WriteType) ret2, _ := ret[2].(error) @@ -73,42 +51,42 @@ func (m *MockdatabaseBuffer) Write(ctx context.Context, timestamp time.Time, val } // Write indicates an expected call of Write -func (mr *MockdatabaseBufferMockRecorder) Write(ctx, timestamp, value, unit, annotation, wOpts interface{}) *gomock.Call { +func (mr *MockdatabaseBufferMockRecorder) Write(ctx, id, timestamp, value, unit, annotation, wOpts interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockdatabaseBuffer)(nil).Write), ctx, timestamp, value, unit, annotation, wOpts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockdatabaseBuffer)(nil).Write), ctx, id, timestamp, value, unit, annotation, wOpts) } // Snapshot mocks base method -func (m *MockdatabaseBuffer) Snapshot(ctx context.Context, blockStart time.Time, id ident.ID, tags ident.Tags, persistFn persist.DataFn, nsCtx namespace.Context) error { +func (m *MockdatabaseBuffer) Snapshot(ctx context.Context, blockStart time0.Time, metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Snapshot", ctx, blockStart, id, tags, persistFn, nsCtx) + ret := m.ctrl.Call(m, "Snapshot", ctx, blockStart, metadata, persistFn, nsCtx) ret0, _ := ret[0].(error) return ret0 } // Snapshot indicates an expected call of Snapshot -func (mr *MockdatabaseBufferMockRecorder) Snapshot(ctx, blockStart, id, tags, persistFn, nsCtx interface{}) *gomock.Call { +func (mr *MockdatabaseBufferMockRecorder) Snapshot(ctx, blockStart, metadata, persistFn, nsCtx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockdatabaseBuffer)(nil).Snapshot), ctx, blockStart, id, tags, persistFn, nsCtx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockdatabaseBuffer)(nil).Snapshot), ctx, blockStart, metadata, persistFn, nsCtx) } // WarmFlush mocks base method -func (m *MockdatabaseBuffer) WarmFlush(ctx context.Context, blockStart time.Time, id ident.ID, tags ident.Tags, persistFn persist.DataFn, nsCtx namespace.Context) (FlushOutcome, error) { +func (m *MockdatabaseBuffer) WarmFlush(ctx context.Context, blockStart time0.Time, metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context) (FlushOutcome, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WarmFlush", ctx, blockStart, id, tags, persistFn, nsCtx) + ret := m.ctrl.Call(m, "WarmFlush", ctx, blockStart, metadata, persistFn, nsCtx) ret0, _ := ret[0].(FlushOutcome) ret1, _ := ret[1].(error) return ret0, ret1 } // WarmFlush indicates an expected call of WarmFlush -func (mr *MockdatabaseBufferMockRecorder) WarmFlush(ctx, blockStart, id, tags, persistFn, nsCtx interface{}) *gomock.Call { +func (mr *MockdatabaseBufferMockRecorder) WarmFlush(ctx, blockStart, metadata, persistFn, nsCtx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WarmFlush", reflect.TypeOf((*MockdatabaseBuffer)(nil).WarmFlush), ctx, blockStart, id, tags, persistFn, nsCtx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WarmFlush", reflect.TypeOf((*MockdatabaseBuffer)(nil).WarmFlush), ctx, blockStart, metadata, persistFn, nsCtx) } // ReadEncoded mocks base method -func (m *MockdatabaseBuffer) ReadEncoded(ctx context.Context, start, end time.Time, nsCtx namespace.Context) ([][]xio.BlockReader, error) { +func (m *MockdatabaseBuffer) ReadEncoded(ctx context.Context, start, end time0.Time, nsCtx namespace.Context) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, start, end, nsCtx) ret0, _ := ret[0].([][]xio.BlockReader) @@ -123,7 +101,7 @@ func (mr *MockdatabaseBufferMockRecorder) ReadEncoded(ctx, start, end, nsCtx int } // FetchBlocksForColdFlush mocks base method -func (m *MockdatabaseBuffer) FetchBlocksForColdFlush(ctx context.Context, start time.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) { +func (m *MockdatabaseBuffer) FetchBlocksForColdFlush(ctx context.Context, start time0.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksForColdFlush", ctx, start, version, nsCtx) ret0, _ := ret[0].(block.FetchBlockResult) @@ -138,7 +116,7 @@ func (mr *MockdatabaseBufferMockRecorder) FetchBlocksForColdFlush(ctx, start, ve } // FetchBlocks mocks base method -func (m *MockdatabaseBuffer) FetchBlocks(ctx context.Context, starts []time.Time, nsCtx namespace.Context) []block.FetchBlockResult { +func (m *MockdatabaseBuffer) FetchBlocks(ctx context.Context, starts []time0.Time, nsCtx namespace.Context) []block.FetchBlockResult { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, starts, nsCtx) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -152,7 +130,7 @@ func (mr *MockdatabaseBufferMockRecorder) FetchBlocks(ctx, starts, nsCtx interfa } // FetchBlocksMetadata mocks base method -func (m *MockdatabaseBuffer) FetchBlocksMetadata(ctx context.Context, start, end time.Time, opts FetchBlocksMetadataOptions) (block.FetchBlockMetadataResults, error) { +func (m *MockdatabaseBuffer) FetchBlocksMetadata(ctx context.Context, start, end time0.Time, opts FetchBlocksMetadataOptions) (block.FetchBlockMetadataResults, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadata", ctx, start, end, opts) ret0, _ := ret[0].(block.FetchBlockMetadataResults) @@ -181,7 +159,7 @@ func (mr *MockdatabaseBufferMockRecorder) IsEmpty() *gomock.Call { } // ColdFlushBlockStarts mocks base method -func (m *MockdatabaseBuffer) ColdFlushBlockStarts(blockStates map[time0.UnixNano]BlockState) OptimizedTimes { +func (m *MockdatabaseBuffer) ColdFlushBlockStarts(blockStates map[time.UnixNano]BlockState) OptimizedTimes { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ColdFlushBlockStarts", blockStates) ret0, _ := ret[0].(OptimizedTimes) diff --git a/src/dbnode/storage/series/buffer_test.go b/src/dbnode/storage/series/buffer_test.go index ac90d0a9b8..3d4bf75ca0 100644 --- a/src/dbnode/storage/series/buffer_test.go +++ b/src/dbnode/storage/series/buffer_test.go @@ -29,9 +29,11 @@ import ( "github.com/m3db/m3/src/dbnode/encoding" "github.com/m3db/m3/src/dbnode/encoding/m3tsz" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/ts" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/context" xerrors "github.com/m3db/m3/src/x/errors" @@ -44,6 +46,10 @@ import ( "github.com/stretchr/testify/require" ) +var ( + testID = ident.StringID("foo") +) + func newBufferTestOptions() Options { encoderPool := encoding.NewEncoderPool(nil) multiReaderIteratorPool := encoding.NewMultiReaderIteratorPool(nil) @@ -79,15 +85,17 @@ func newBufferTestOptions() Options { // Writes to buffer, verifying no error and that further writes should happen. func verifyWriteToBufferSuccess( t *testing.T, + id ident.ID, buffer databaseBuffer, v DecodedTestValue, schema namespace.SchemaDescr, ) { - verifyWriteToBuffer(t, buffer, v, schema, true, false) + verifyWriteToBuffer(t, id, buffer, v, schema, true, false) } func verifyWriteToBuffer( t *testing.T, + id ident.ID, buffer databaseBuffer, v DecodedTestValue, schema namespace.SchemaDescr, @@ -97,7 +105,7 @@ func verifyWriteToBuffer( ctx := context.NewContext() defer ctx.Close() - wasWritten, _, err := buffer.Write(ctx, v.Timestamp, v.Value, v.Unit, + wasWritten, _, err := buffer.Write(ctx, id, v.Timestamp, v.Value, v.Unit, v.Annotation, WriteOptions{SchemaDesc: schema}) if expectErr { @@ -117,13 +125,12 @@ func TestBufferWriteTooFuture(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) ctx := context.NewContext() defer ctx.Close() - wasWritten, _, err := buffer.Write(ctx, curr.Add(rops.BufferFuture()), 1, + wasWritten, _, err := buffer.Write(ctx, testID, curr.Add(rops.BufferFuture()), 1, xtime.Second, nil, WriteOptions{}) assert.False(t, wasWritten) assert.Error(t, err) @@ -143,14 +150,14 @@ func TestBufferWriteTooPast(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) ctx := context.NewContext() defer ctx.Close() // Writes are inclusive on buffer past start border. Must be before that inclusive border to // be a cold write. To test this we write a second further into the past. - wasWritten, _, err := buffer.Write(ctx, curr.Add(-1*rops.BufferPast()-time.Second), 1, xtime.Second, + wasWritten, _, err := buffer.Write(ctx, testID, + curr.Add(-1*rops.BufferPast()-time.Second), 1, xtime.Second, nil, WriteOptions{}) assert.False(t, wasWritten) assert.Error(t, err) @@ -177,7 +184,6 @@ func TestBufferWriteColdTooFutureRetention(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) ctx := context.NewContext() @@ -185,8 +191,8 @@ func TestBufferWriteColdTooFutureRetention(t *testing.T) { futureRetention := time.Second + maxDuration(rops.BufferFuture(), rops.FutureRetentionPeriod()) - wasWritten, _, err := buffer.Write(ctx, curr.Add(futureRetention), 1, - xtime.Second, nil, WriteOptions{}) + wasWritten, _, err := buffer.Write(ctx, + testID, curr.Add(futureRetention), 1, xtime.Second, nil, WriteOptions{}) assert.False(t, wasWritten) assert.Error(t, err) assert.True(t, xerrors.IsInvalidParams(err)) @@ -205,7 +211,6 @@ func TestBufferWriteColdTooPastRetention(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) ctx := context.NewContext() @@ -213,7 +218,8 @@ func TestBufferWriteColdTooPastRetention(t *testing.T) { pastRetention := time.Second + maxDuration(rops.BufferPast(), rops.RetentionPeriod()) - wasWritten, _, err := buffer.Write(ctx, curr.Add(-pastRetention), 1, xtime.Second, + wasWritten, _, err := buffer.Write(ctx, testID, + curr.Add(-pastRetention), 1, xtime.Second, nil, WriteOptions{}) assert.False(t, wasWritten) assert.Error(t, err) @@ -236,13 +242,13 @@ func TestBufferWriteError(t *testing.T) { return curr })) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) defer ctx.Close() timeUnitNotExist := xtime.Unit(127) - wasWritten, _, err := buffer.Write(ctx, curr, 1, timeUnitNotExist, nil, WriteOptions{}) + wasWritten, _, err := buffer.Write(ctx, testID, + curr, 1, timeUnitNotExist, nil, WriteOptions{}) require.False(t, wasWritten) require.Error(t, err) } @@ -260,7 +266,6 @@ func testBufferWriteRead(t *testing.T, opts Options, setAnn setAnnotation) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -276,7 +281,7 @@ func testBufferWriteRead(t *testing.T, opts Options, setAnn setAnnotation) { } for _, v := range data { - verifyWriteToBufferSuccess(t, buffer, v, nsCtx.Schema) + verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema) } ctx := context.NewContext() @@ -299,7 +304,6 @@ func TestBufferReadOnlyMatchingBuckets(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -310,7 +314,7 @@ func TestBufferReadOnlyMatchingBuckets(t *testing.T) { for _, v := range data { curr = v.Timestamp - verifyWriteToBufferSuccess(t, buffer, v, nil) + verifyWriteToBufferSuccess(t, testID, buffer, v, nil) } ctx := context.NewContext() @@ -342,7 +346,6 @@ func TestBufferWriteOutOfOrder(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -356,7 +359,7 @@ func TestBufferWriteOutOfOrder(t *testing.T) { if v.Timestamp.After(curr) { curr = v.Timestamp } - verifyWriteToBufferSuccess(t, buffer, v, nil) + verifyWriteToBufferSuccess(t, testID, buffer, v, nil) } buckets, ok := buffer.bucketVersionsAt(start) @@ -488,7 +491,6 @@ func newTestBufferWithCustomData( ) (*dbBuffer, map[xtime.UnixNano][]DecodedTestValue) { buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) expectedMap := make(map[xtime.UnixNano][]DecodedTestValue) @@ -714,7 +716,6 @@ func TestIndexedBufferWriteOnlyWritesSinglePoint(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -734,7 +735,8 @@ func TestIndexedBufferWriteOnlyWritesSinglePoint(t *testing.T) { ForceValue: forceValue, }, } - wasWritten, _, err := buffer.Write(ctx, v.Timestamp, v.Value, v.Unit, + wasWritten, _, err := buffer.Write(ctx, testID, + v.Timestamp, v.Value, v.Unit, v.Annotation, writeOpts) require.NoError(t, err) expectedWrite := i == 0 @@ -768,7 +770,6 @@ func testBufferFetchBlocks(t *testing.T, opts Options, setAnn setAnnotation) { buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) buffer.bucketsMap[xtime.ToUnixNano(b.start)] = b @@ -853,7 +854,6 @@ func TestBufferFetchBlocksOneResultPerBlock(t *testing.T) { buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) buffer.bucketsMap[xtime.ToUnixNano(b.start)] = b @@ -880,7 +880,6 @@ func TestBufferFetchBlocksMetadata(t *testing.T) { buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) buffer.bucketsMap[xtime.ToUnixNano(b.start)] = b @@ -931,7 +930,6 @@ func TestBufferTickReordersOutOfOrderBuffers(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -948,7 +946,7 @@ func TestBufferTickReordersOutOfOrderBuffers(t *testing.T) { for _, v := range data { curr = v.Timestamp - verifyWriteToBufferSuccess(t, buffer, v, nil) + verifyWriteToBufferSuccess(t, testID, buffer, v, nil) } var encoders []encoding.Encoder @@ -1022,7 +1020,6 @@ func TestBufferRemoveBucket(t *testing.T) { })) buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -1038,7 +1035,7 @@ func TestBufferRemoveBucket(t *testing.T) { for _, v := range data { curr = v.Timestamp - verifyWriteToBufferSuccess(t, buffer, v, nil) + verifyWriteToBufferSuccess(t, testID, buffer, v, nil) } buckets, exists := buffer.bucketVersionsAt(start) @@ -1120,7 +1117,6 @@ func testBufferWithEmptyEncoder(t *testing.T, testSnapshot bool) { return curr })) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -1128,7 +1124,8 @@ func testBufferWithEmptyEncoder(t *testing.T, testSnapshot bool) { ctx := context.NewContext() defer ctx.Close() - wasWritten, _, err := buffer.Write(ctx, curr, 1, xtime.Second, nil, WriteOptions{}) + wasWritten, _, err := buffer.Write(ctx, testID, + curr, 1, xtime.Second, nil, WriteOptions{}) require.NoError(t, err) require.True(t, wasWritten) @@ -1152,21 +1149,26 @@ func testBufferWithEmptyEncoder(t *testing.T, testSnapshot bool) { } require.Equal(t, 1, len(encoders)) - assertPersistDataFn := func(id ident.ID, tags ident.Tags, segment ts.Segment, checlsum uint32) error { + assertPersistDataFn := func(persist.Metadata, ts.Segment, uint32) error { t.Fatal("persist fn should not have been called") return nil } + metadata := persist.NewMetadata(doc.Document{ + ID: []byte("some-id"), + }) + if testSnapshot { ctx = context.NewContext() defer ctx.Close() - err = buffer.Snapshot(ctx, start, ident.StringID("some-id"), ident.Tags{}, assertPersistDataFn, namespace.Context{}) + + err = buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, namespace.Context{}) assert.NoError(t, err) } else { ctx = context.NewContext() defer ctx.Close() _, err = buffer.WarmFlush( - ctx, start, ident.StringID("some-id"), ident.Tags{}, assertPersistDataFn, namespace.Context{}) + ctx, start, metadata, assertPersistDataFn, namespace.Context{}) require.NoError(t, err) } } @@ -1194,7 +1196,6 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) { defer ctx.Close() buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -1220,7 +1221,7 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) { // Perform the writes. for _, v := range data { curr = v.Timestamp - verifyWriteToBufferSuccess(t, buffer, v, nsCtx.Schema) + verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema) } // Verify internal state. @@ -1242,7 +1243,7 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) { assert.Equal(t, 2, len(encoders)) - assertPersistDataFn := func(id ident.ID, tags ident.Tags, segment ts.Segment, checlsum uint32) error { + assertPersistDataFn := func(metadata persist.Metadata, segment ts.Segment, checlsum uint32) error { // Check we got the right results. expectedData := data[:len(data)-1] // -1 because we don't expect the last datapoint. expectedCopy := make([]DecodedTestValue, len(expectedData)) @@ -1259,7 +1260,11 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) { } // Perform a snapshot. - err := buffer.Snapshot(ctx, start, ident.StringID("some-id"), ident.Tags{}, assertPersistDataFn, nsCtx) + metadata := persist.NewMetadata(doc.Document{ + ID: []byte("some-id"), + }) + + err := buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, nsCtx) assert.NoError(t, err) // Check internal state to make sure the merge happened and was persisted. @@ -1297,7 +1302,6 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) { return curr })) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) @@ -1320,7 +1324,7 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) { for _, v := range warmData { // Set curr so that every write is a warm write. curr = v.Timestamp - verifyWriteToBufferSuccess(t, buffer, v, nsCtx.Schema) + verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema) } // Also add cold writes to the buffer to verify that Snapshot will capture @@ -1343,7 +1347,7 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) { // Perform cold writes. for _, v := range coldData { - verifyWriteToBufferSuccess(t, buffer, v, nsCtx.Schema) + verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema) } // Verify internal state. @@ -1383,7 +1387,7 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) { } assert.Equal(t, 2, len(coldEncoders)) - assertPersistDataFn := func(id ident.ID, tags ident.Tags, segment ts.Segment, checlsum uint32) error { + assertPersistDataFn := func(metadata persist.Metadata, segment ts.Segment, checlsum uint32) error { // Check we got the right results. // `len(warmData)-1` because we don't expect the last warm datapoint // since it's for a different block. @@ -1403,7 +1407,11 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) { } // Perform a snapshot. - err := buffer.Snapshot(ctx, start, ident.StringID("some-id"), ident.Tags{}, assertPersistDataFn, nsCtx) + metadata := persist.NewMetadata(doc.Document{ + ID: []byte("some-id"), + }) + + err := buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, nsCtx) require.NoError(t, err) // Check internal state of warm bucket to make sure the merge happened and @@ -1741,7 +1749,7 @@ func TestFetchBlocksForColdFlush(t *testing.T) { assert.NoError(t, err) requireReaderValuesEqual(t, []DecodedTestValue{}, [][]xio.BlockReader{result.Blocks}, opts, nsCtx) assert.Equal(t, time.Time{}, result.FirstWrite) - wasWritten, _, err := buffer.Write(ctx, blockStart2, 1, + wasWritten, _, err := buffer.Write(ctx, testID, blockStart2, 1, xtime.Second, nil, WriteOptions{}) assert.True(t, wasWritten) result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart2, 1, nsCtx) @@ -1773,7 +1781,6 @@ func TestBufferLoadWarmWrite(t *testing.T) { nsCtx = namespace.Context{} ) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) encoded, err := buffer.ReadEncoded(context.NewContext(), curr, curr.Add(blockSize), nsCtx) @@ -1807,7 +1814,6 @@ func TestBufferLoadColdWrite(t *testing.T) { nsCtx = namespace.Context{} ) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) encoded, err := buffer.ReadEncoded(context.NewContext(), curr, curr.Add(blockSize), nsCtx) @@ -1930,12 +1936,11 @@ func TestUpsertProto(t *testing.T) { t.Run(test.desc, func(t *testing.T) { buffer := newDatabaseBuffer().(*dbBuffer) buffer.Reset(databaseBufferResetOptions{ - ID: ident.StringID("foo"), Options: opts, }) for _, write := range test.writes { - verifyWriteToBuffer(t, buffer, write.data, nsCtx.Schema, + verifyWriteToBuffer(t, testID, buffer, write.data, nsCtx.Schema, write.expectWritten, write.expectErr) } diff --git a/src/dbnode/storage/series/lookup/lookup_mock.go b/src/dbnode/storage/series/lookup/lookup_mock.go index ebcdf5f5a2..9f8ac4e03c 100644 --- a/src/dbnode/storage/series/lookup/lookup_mock.go +++ b/src/dbnode/storage/series/lookup/lookup_mock.go @@ -1,26 +1,6 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/storage/series/lookup (interfaces: OnReleaseReadWriteRef) -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Package lookup is a generated GoMock package. package lookup diff --git a/src/dbnode/storage/series/series.go b/src/dbnode/storage/series/series.go index b25f6c91c9..d56dd20063 100644 --- a/src/dbnode/storage/series/series.go +++ b/src/dbnode/storage/series/series.go @@ -30,6 +30,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/ts" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/instrument" @@ -60,11 +61,11 @@ type dbSeries struct { opts Options // NB(r): One should audit all places that access the - // series ID before changing ownership semantics (e.g. + // series metadata before changing ownership semantics (e.g. // pooling the ID rather than releasing it to the GC on // calling series.Reset()). id ident.ID - tags ident.Tags + metadata doc.Document uniqueIndex uint64 buffer databaseBuffer @@ -111,11 +112,11 @@ func (s *dbSeries) ID() ident.ID { return id } -func (s *dbSeries) Tags() ident.Tags { +func (s *dbSeries) Metadata() doc.Document { s.RLock() - tags := s.tags + metadata := s.metadata s.RUnlock() - return tags + return metadata } func (s *dbSeries) UniqueIndex() uint64 { @@ -307,8 +308,8 @@ func (s *dbSeries) Write( } } - wasWritten, writeType, err := s.buffer.Write(ctx, timestamp, value, unit, annotation, wOpts) - return wasWritten, writeType, err + return s.buffer.Write(ctx, s.id, timestamp, value, + unit, annotation, wOpts) } func (s *dbSeries) ReadEncoded( @@ -380,7 +381,7 @@ func (s *dbSeries) FetchBlocksMetadata( // NB(r): Since ID and Tags are garbage collected we can safely // return refs. tagsIter := s.opts.IdentifierPool().TagsIterator() - tagsIter.Reset(s.tags) + tagsIter.ResetFields(s.metadata.Fields) return block.NewFetchBlocksMetadataResult(s.id, tagsIter, res), nil } @@ -529,7 +530,8 @@ func (s *dbSeries) WarmFlush( // Need a write lock because the buffer WarmFlush method mutates // state (by performing a pro-active merge). s.Lock() - outcome, err := s.buffer.WarmFlush(ctx, blockStart, s.id, s.tags, persistFn, nsCtx) + outcome, err := s.buffer.WarmFlush(ctx, blockStart, + persist.NewMetadata(s.metadata), persistFn, nsCtx) s.Unlock() return outcome, err } @@ -543,9 +545,10 @@ func (s *dbSeries) Snapshot( // Need a write lock because the buffer Snapshot method mutates // state (by performing a pro-active merge). s.Lock() - defer s.Unlock() - - return s.buffer.Snapshot(ctx, blockStart, s.id, s.tags, persistFn, nsCtx) + err := s.buffer.Snapshot(ctx, blockStart, + persist.NewMetadata(s.metadata), persistFn, nsCtx) + s.Unlock() + return err } func (s *dbSeries) ColdFlushBlockStarts(blockStates BootstrappedBlockStateSnapshot) OptimizedTimes { @@ -561,7 +564,7 @@ func (s *dbSeries) Close() { // See Reset() for why these aren't finalized. s.id = nil - s.tags = ident.Tags{} + s.metadata = doc.Document{} s.uniqueIndex = 0 switch s.opts.CachePolicy() { @@ -605,11 +608,10 @@ func (s *dbSeries) Reset(opts DatabaseSeriesOptions) { // The same goes for the series tags. s.Lock() s.id = opts.ID - s.tags = opts.Tags + s.metadata = opts.Metadata s.uniqueIndex = opts.UniqueIndex s.cachedBlocks.Reset() s.buffer.Reset(databaseBufferResetOptions{ - ID: opts.ID, BlockRetriever: opts.BlockRetriever, Options: opts.Options, }) diff --git a/src/dbnode/storage/series/series_mock.go b/src/dbnode/storage/series/series_mock.go index 5f64d8b22b..23a1bba2a8 100644 --- a/src/dbnode/storage/series/series_mock.go +++ b/src/dbnode/storage/series/series_mock.go @@ -1,26 +1,6 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/storage/series (interfaces: DatabaseSeries,QueryableBlockRetriever) -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Package series is a generated GoMock package. package series @@ -33,6 +13,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/ts" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" time0 "github.com/m3db/m3/src/x/time" @@ -176,6 +157,20 @@ func (mr *MockDatabaseSeriesMockRecorder) LoadBlock(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadBlock", reflect.TypeOf((*MockDatabaseSeries)(nil).LoadBlock), arg0, arg1) } +// Metadata mocks base method +func (m *MockDatabaseSeries) Metadata() doc.Document { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Metadata") + ret0, _ := ret[0].(doc.Document) + return ret0 +} + +// Metadata indicates an expected call of Metadata +func (mr *MockDatabaseSeriesMockRecorder) Metadata() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockDatabaseSeries)(nil).Metadata)) +} + // NumActiveBlocks mocks base method func (m *MockDatabaseSeries) NumActiveBlocks() int { m.ctrl.T.Helper() @@ -255,20 +250,6 @@ func (mr *MockDatabaseSeriesMockRecorder) Snapshot(arg0, arg1, arg2, arg3 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockDatabaseSeries)(nil).Snapshot), arg0, arg1, arg2, arg3) } -// Tags mocks base method -func (m *MockDatabaseSeries) Tags() ident.Tags { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Tags") - ret0, _ := ret[0].(ident.Tags) - return ret0 -} - -// Tags indicates an expected call of Tags -func (mr *MockDatabaseSeriesMockRecorder) Tags() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tags", reflect.TypeOf((*MockDatabaseSeries)(nil).Tags)) -} - // Tick mocks base method func (m *MockDatabaseSeries) Tick(arg0 ShardBlockStateSnapshot, arg1 namespace.Context) (TickResult, error) { m.ctrl.T.Helper() diff --git a/src/dbnode/storage/series/series_test.go b/src/dbnode/storage/series/series_test.go index 13aa8d8349..1050fe79bb 100644 --- a/src/dbnode/storage/series/series_test.go +++ b/src/dbnode/storage/series/series_test.go @@ -27,6 +27,8 @@ import ( "testing" "time" + "github.com/m3db/m3/src/dbnode/storage/index/convert" + "github.com/m3db/m3/src/dbnode/clock" "github.com/m3db/m3/src/dbnode/encoding" "github.com/m3db/m3/src/dbnode/encoding/m3tsz" @@ -1148,10 +1150,13 @@ func TestSeriesOutOfOrderWritesAndRotate(t *testing.T) { expected []ts.Datapoint ) + metadata, err := convert.FromSeriesIDAndTags(id, tags) + require.NoError(t, err) + series := NewDatabaseSeries(DatabaseSeriesOptions{ - ID: id, - Tags: tags, - Options: opts, + ID: id, + Metadata: metadata, + Options: opts, }).(*dbSeries) for iter := 0; iter < numBlocks; iter++ { diff --git a/src/dbnode/storage/series/types.go b/src/dbnode/storage/series/types.go index 14428a3e14..ff033aa574 100644 --- a/src/dbnode/storage/series/types.go +++ b/src/dbnode/storage/series/types.go @@ -30,6 +30,7 @@ import ( "github.com/m3db/m3/src/dbnode/retention" "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/instrument" @@ -41,7 +42,7 @@ import ( // DatabaseSeriesOptions is a set of options for creating a database series. type DatabaseSeriesOptions struct { ID ident.ID - Tags ident.Tags + Metadata doc.Document UniqueIndex uint64 BlockRetriever QueryableBlockRetriever OnRetrieveBlock block.OnRetrieveBlock @@ -57,8 +58,8 @@ type DatabaseSeries interface { // ID returns the ID of the series. ID() ident.ID - // Tags return the tags of the series. - Tags() ident.Tags + // Metadata returns the metadata of the series. + Metadata() doc.Document // UniqueIndex is the unique index for the series (for this current // process, unless the time series expires). diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index 5fc2256b6e..9e77b8f6bc 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -465,11 +465,13 @@ func (s *dbShard) OnRetrieveBlock( entry, err = s.newShardEntry(id, newTagsIterArg(tags)) if err != nil { // should never happen - s.logger.Error("[invariant violated] unable to create shardEntry from retrieved block data", - zap.Stringer("id", id), - zap.Time("startTime", startTime), - zap.Error(err), - ) + instrument.EmitAndLogInvariantViolation(s.opts.InstrumentOptions(), + func(logger *zap.Logger) { + logger.Error("unable to create shardEntry from retrieved block data", + zap.Stringer("id", id), + zap.Time("startTime", startTime), + zap.Error(err)) + }) return } @@ -477,7 +479,7 @@ func (s *dbShard) OnRetrieveBlock( // have been already been indexed when it was written copiedID := entry.Series.ID() copiedTagsIter := s.identifierPool.TagsIterator() - copiedTagsIter.Reset(entry.Series.Tags()) + copiedTagsIter.ResetFields(entry.Series.Metadata().Fields) s.insertQueue.Insert(dbShardInsert{ entry: entry, opts: dbShardInsertAsyncOptions{ @@ -912,7 +914,6 @@ func (s *dbShard) writeAndIndex( var ( commitLogSeriesID ident.ID - commitLogSeriesTags ident.Tags commitLogSeriesUniqueIndex uint64 // Err on the side of caution and always write to the commitlog if writing // async, since there is no information about whether the write succeeded @@ -931,7 +932,6 @@ func (s *dbShard) writeAndIndex( // as the commit log need to use the reference without the // overhead of ownership tracking. This makes taking a ref here safe. commitLogSeriesID = entry.Series.ID() - commitLogSeriesTags = entry.Series.Tags() commitLogSeriesUniqueIndex = entry.Index if err == nil && shouldReverseIndex { if entry.NeedsIndexUpdate(s.reverseIndex.BlockStartForWriteTime(timestamp)) { @@ -980,7 +980,6 @@ func (s *dbShard) writeAndIndex( // and adding ownership tracking to use it in the commit log // (i.e. registering a dependency on the context) is too expensive. commitLogSeriesID = result.copiedID - commitLogSeriesTags = result.copiedTags commitLogSeriesUniqueIndex = result.entry.Index } @@ -989,7 +988,6 @@ func (s *dbShard) writeAndIndex( UniqueIndex: commitLogSeriesUniqueIndex, Namespace: s.namespace.ID(), ID: commitLogSeriesID, - Tags: commitLogSeriesTags, Shard: s.shard, } @@ -1145,66 +1143,55 @@ func (s *dbShard) newShardEntry( tagsArgOpts tagsArgOptions, ) (*lookup.Entry, error) { // NB(r): As documented in storage/series.DatabaseSeries the series IDs - // are garbage collected, hence we cast the ID to a BytesID that can't be - // finalized. + // and metadata are garbage collected, hence we cast the ID to a BytesID + // that can't be finalized. // Since series are purged so infrequently the overhead of not releasing - // back an ID to a pool is amortized over a long period of time. + // back an ID and metadata to a pool is amortized over a long period of + // time. + // Also of note, when a series is indexed in multiple index segments it is + // worth keeping the metadata around so it can be referenced to twice + // without creating a new array of []doc.Field for all the tags twice. + // Hence this stays on the storage/series.DatabaseSeries for when it needs + // to be re-indexed. var ( - seriesID ident.BytesID - seriesTags ident.Tags - err error + seriesID ident.BytesID + seriesMetadata doc.Document + err error ) - if id.IsNoFinalize() { - // If the ID is already marked as NoFinalize, meaning it won't be returned - // to any pools, then we can directly take reference to it. - // We make sure to use ident.BytesID for this ID to avoid inc/decref when - // accessing the ID since it's not pooled and therefore the safety is not - // required. - seriesID = ident.BytesID(id.Bytes()) - } else { - seriesID = ident.BytesID(append([]byte(nil), id.Bytes()...)) - seriesID.NoFinalize() - } switch tagsArgOpts.arg { case tagsIterArg: - // NB(r): Take a duplicate so that we don't double close the tag iterator - // passed to this method - tagsIter := tagsArgOpts.tagsIter.Duplicate() - - // Ensure tag iterator at start - if tagsIter.CurrentIndex() != 0 { - return nil, errNewShardEntryTagsIterNotAtIndexZero - } + // NB(r): Rewind so we record the tag iterator from the beginning. + tagsIter := tagsArgOpts.tagsIter + tagsIter.Rewind() // Pass nil for the identifier pool because the pool will force us to use an array // with a large capacity to store the tags. Since these tags are long-lived, it's // better to allocate an array of the exact size to save memory. - seriesTags, err = convert.TagsFromTagsIter(seriesID, tagsIter, nil) + seriesMetadata, err = convert.FromSeriesIDAndTagIter(seriesID, tagsIter) tagsIter.Close() if err != nil { return nil, err } - if err := convert.ValidateSeries(seriesID, seriesTags); err != nil { + case tagsArg: + seriesMetadata, err = convert.FromSeriesIDAndTags(id, tagsArgOpts.tags) + if err != nil { return nil, err } - case tagsArg: - seriesTags = tagsArgOpts.tags - default: return nil, errNewShardEntryTagsTypeInvalid } - // Don't put tags back in a pool since the merge logic may still have a - // handle on these. - seriesTags.NoFinalize() + + // Use the same bytes as the series metadata for the ID. + seriesID = ident.BytesID(seriesMetadata.ID) uniqueIndex := s.increasingIndex.nextIndex() newSeries := s.seriesPool.Get() newSeries.Reset(series.DatabaseSeriesOptions{ ID: seriesID, - Tags: seriesTags, + Metadata: seriesMetadata, UniqueIndex: uniqueIndex, BlockRetriever: s.seriesBlockRetriever, OnRetrieveBlock: s.seriesOnRetrieveBlock, @@ -1215,9 +1202,8 @@ func (s *dbShard) newShardEntry( } type insertAsyncResult struct { - wg *sync.WaitGroup - copiedID ident.ID - copiedTags ident.Tags + wg *sync.WaitGroup + copiedID ident.ID // entry is not guaranteed to be the final entry // inserted into the shard map in case there is already // an existing entry waiting in the insert queue @@ -1284,9 +1270,8 @@ func (s *dbShard) insertSeriesAsyncBatched( return insertAsyncResult{ wg: wg, // Make sure to return the copied ID from the new series - copiedID: entry.Series.ID(), - copiedTags: entry.Series.Tags(), - entry: entry, + copiedID: entry.Series.ID(), + entry: entry, }, err } @@ -1506,24 +1491,13 @@ func (s *dbShard) insertSeriesBatch(inserts []dbShardInsert) error { // this method (insertSeriesBatch) via `entryRefCountIncremented` mechanism. entry.OnIndexPrepare() - // Don't insert cold index writes into the index insert queue. - id := entry.Series.ID() - tags := entry.Series.Tags().Values() - - var d doc.Document - d.ID = id.Bytes() // IDs from shard entries are always set NoFinalize - d.Fields = make(doc.Fields, 0, len(tags)) - for _, tag := range tags { - d.Fields = append(d.Fields, doc.Field{ - Name: tag.Name.Bytes(), // Tags from shard entries are always set NoFinalize - Value: tag.Value.Bytes(), // Tags from shard entries are always set NoFinalize - }) - } - indexBatch.Append(index.WriteBatchEntry{ + writeBatchEntry := index.WriteBatchEntry{ Timestamp: pendingIndex.timestamp, OnIndexSeries: entry, EnqueuedAt: pendingIndex.enqueuedAt, - }, d) + } + + indexBatch.Append(writeBatchEntry, entry.Series.Metadata()) } if inserts[i].opts.hasPendingRetrievedBlock { @@ -2244,7 +2218,7 @@ func (s *dbShard) ColdFlush( // series and add them to the resources for further processing. s.forEachShardEntry(func(entry *lookup.Entry) bool { curr := entry.Series - seriesID := curr.ID() + seriesMetadata := curr.Metadata() blockStarts := curr.ColdFlushBlockStarts(blockStatesSnapshot) blockStarts.ForEach(func(t xtime.UnixNano) { // Cold flushes can only happen on blockStarts that have been @@ -2266,9 +2240,12 @@ func (s *dbShard) ColdFlush( seriesList = newIDList(idElementPool) dirtySeriesToWrite[t] = seriesList } - element := seriesList.PushBack(seriesID) + element := seriesList.PushBack(seriesMetadata) - dirtySeries.Set(idAndBlockStart{blockStart: t, id: seriesID}, element) + dirtySeries.Set(idAndBlockStart{ + blockStart: t, + id: seriesMetadata.ID, + }, element) }) return true @@ -2530,17 +2507,6 @@ func (s *dbShard) Repair( return repairer.Repair(ctx, nsCtx, nsMeta, tr, s) } -func (s *dbShard) TagsFromSeriesID(seriesID ident.ID) (ident.Tags, bool, error) { - s.RLock() - entry, _, err := s.lookupEntryWithLock(seriesID) - s.RUnlock() - if entry == nil || err != nil { - return ident.Tags{}, false, err - } - - return entry.Series.Tags(), true, nil -} - func (s *dbShard) BootstrapState() BootstrapState { s.RLock() bs := s.bootstrapState diff --git a/src/dbnode/storage/shard_fetch_blocks_metadata_test.go b/src/dbnode/storage/shard_fetch_blocks_metadata_test.go index ecad3df4b2..8d282c24af 100644 --- a/src/dbnode/storage/shard_fetch_blocks_metadata_test.go +++ b/src/dbnode/storage/shard_fetch_blocks_metadata_test.go @@ -30,6 +30,7 @@ import ( "github.com/m3db/m3/src/dbnode/digest" "github.com/m3db/m3/src/dbnode/generated/proto/pagetoken" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/series" @@ -194,7 +195,9 @@ func TestShardFetchBlocksMetadataV2WithSeriesCachePolicyNotCacheAll(t *testing.T bytes := checked.NewBytes(data, nil) bytes.IncRef() - err = writer.Write(id, ident.Tags{}, bytes, checksum) + meta := persist.NewMetadataFromIDAndTags(id, ident.Tags{}, + persist.MetadataOptions{}) + err = writer.Write(meta, bytes, checksum) require.NoError(t, err) blockMetadataResult := block.NewFetchBlockMetadataResult(at, diff --git a/src/dbnode/storage/shard_test.go b/src/dbnode/storage/shard_test.go index 6c675ba4fc..02d0c4ef73 100644 --- a/src/dbnode/storage/shard_test.go +++ b/src/dbnode/storage/shard_test.go @@ -90,7 +90,6 @@ func testDatabaseShardWithIndexFn( func addMockSeries(ctrl *gomock.Controller, shard *dbShard, id ident.ID, tags ident.Tags, index uint64) *series.MockDatabaseSeries { series := series.NewMockDatabaseSeries(ctrl) series.EXPECT().ID().Return(id).AnyTimes() - series.EXPECT().Tags().Return(tags).AnyTimes() series.EXPECT().IsEmpty().Return(false).AnyTimes() shard.Lock() shard.insertNewShardEntryWithLock(lookup.NewEntry(series, index)) @@ -420,7 +419,7 @@ func TestShardFlushSeriesFlushError(t *testing.T) { var closed bool flush := persist.NewMockFlushPreparer(ctrl) prepared := persist.PreparedDataPersist{ - Persist: func(ident.ID, ident.Tags, ts.Segment, uint32) error { return nil }, + Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil }, Close: func() error { closed = true; return nil }, } prepareOpts := xtest.CmpMatcher(persist.DataPrepareOptions{ @@ -497,7 +496,7 @@ func TestShardFlushSeriesFlushSuccess(t *testing.T) { var closed bool flush := persist.NewMockFlushPreparer(ctrl) prepared := persist.PreparedDataPersist{ - Persist: func(ident.ID, ident.Tags, ts.Segment, uint32) error { return nil }, + Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil }, Close: func() error { closed = true; return nil }, } @@ -798,7 +797,7 @@ func TestShardSnapshotSeriesSnapshotSuccess(t *testing.T) { var closed bool snapshotPreparer := persist.NewMockSnapshotPreparer(ctrl) prepared := persist.PreparedDataPersist{ - Persist: func(ident.ID, ident.Tags, ts.Segment, uint32) error { return nil }, + Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil }, Close: func() error { closed = true; return nil }, } @@ -1729,19 +1728,6 @@ func TestShardNewEntryDoesNotAlterIDOrTags(t *testing.T) { assert.True(t, entry.Series.ID().Equal(seriesID)) // NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section assert.False(t, unsafe.Pointer(&entryIDBytes[0]) == unsafe.Pointer(&seriesIDBytes[0])) - - // Ensure Tags equal and NOT same ref for tags - assert.True(t, entry.Series.Tags().Equal(seriesTags)) - require.Equal(t, 1, len(entry.Series.Tags().Values())) - - entryTagNameBytes := entry.Series.Tags().Values()[0].Name.Bytes() - entryTagValueBytes := entry.Series.Tags().Values()[0].Value.Bytes() - seriesTagNameBytes := seriesTags.Values()[0].Name.Bytes() - seriesTagValueBytes := seriesTags.Values()[0].Value.Bytes() - - // NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section - assert.False(t, unsafe.Pointer(&entryTagNameBytes[0]) == unsafe.Pointer(&seriesTagNameBytes[0])) - assert.False(t, unsafe.Pointer(&entryTagValueBytes[0]) == unsafe.Pointer(&seriesTagValueBytes[0])) } // TestShardNewEntryTakesRefToNoFinalizeID ensures that when an ID is @@ -1792,19 +1778,6 @@ func TestShardNewEntryTakesRefToNoFinalizeID(t *testing.T) { assert.True(t, entry.Series.ID().Equal(seriesID)) // NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section assert.True(t, unsafe.Pointer(&entryIDBytes[0]) == unsafe.Pointer(&seriesIDBytes[0])) - - // Ensure Tags equal and NOT same ref for tags - assert.True(t, entry.Series.Tags().Equal(seriesTags)) - require.Equal(t, 1, len(entry.Series.Tags().Values())) - - entryTagNameBytes := entry.Series.Tags().Values()[0].Name.Bytes() - entryTagValueBytes := entry.Series.Tags().Values()[0].Value.Bytes() - seriesTagNameBytes := seriesTags.Values()[0].Name.Bytes() - seriesTagValueBytes := seriesTags.Values()[0].Value.Bytes() - - // NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section - assert.False(t, unsafe.Pointer(&entryTagNameBytes[0]) == unsafe.Pointer(&seriesTagNameBytes[0])) - assert.False(t, unsafe.Pointer(&entryTagValueBytes[0]) == unsafe.Pointer(&seriesTagValueBytes[0])) } func TestShardIterateBatchSize(t *testing.T) { diff --git a/src/dbnode/storage/storage_mock.go b/src/dbnode/storage/storage_mock.go index f456ebe850..b2bbfe04c5 100644 --- a/src/dbnode/storage/storage_mock.go +++ b/src/dbnode/storage/storage_mock.go @@ -1,60 +1,38 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/dbnode/storage/types.go - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/storage/types.go // Package storage is a generated GoMock package. package storage import ( - "reflect" - "sync" - "time" - - "github.com/m3db/m3/src/dbnode/clock" - "github.com/m3db/m3/src/dbnode/encoding" - "github.com/m3db/m3/src/dbnode/namespace" - "github.com/m3db/m3/src/dbnode/persist" - "github.com/m3db/m3/src/dbnode/persist/fs" - "github.com/m3db/m3/src/dbnode/persist/fs/commitlog" - "github.com/m3db/m3/src/dbnode/runtime" - "github.com/m3db/m3/src/dbnode/sharding" - "github.com/m3db/m3/src/dbnode/storage/block" - "github.com/m3db/m3/src/dbnode/storage/bootstrap" - "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" - "github.com/m3db/m3/src/dbnode/storage/index" - "github.com/m3db/m3/src/dbnode/storage/repair" - "github.com/m3db/m3/src/dbnode/storage/series" - "github.com/m3db/m3/src/dbnode/ts" - "github.com/m3db/m3/src/dbnode/x/xio" - "github.com/m3db/m3/src/dbnode/x/xpool" - "github.com/m3db/m3/src/x/context" - "github.com/m3db/m3/src/x/ident" - "github.com/m3db/m3/src/x/instrument" - "github.com/m3db/m3/src/x/mmap" - "github.com/m3db/m3/src/x/pool" - sync0 "github.com/m3db/m3/src/x/sync" - time0 "github.com/m3db/m3/src/x/time" - - "github.com/golang/mock/gomock" + gomock "github.com/golang/mock/gomock" + clock "github.com/m3db/m3/src/dbnode/clock" + encoding "github.com/m3db/m3/src/dbnode/encoding" + namespace "github.com/m3db/m3/src/dbnode/namespace" + persist "github.com/m3db/m3/src/dbnode/persist" + fs "github.com/m3db/m3/src/dbnode/persist/fs" + commitlog "github.com/m3db/m3/src/dbnode/persist/fs/commitlog" + runtime "github.com/m3db/m3/src/dbnode/runtime" + sharding "github.com/m3db/m3/src/dbnode/sharding" + block "github.com/m3db/m3/src/dbnode/storage/block" + bootstrap "github.com/m3db/m3/src/dbnode/storage/bootstrap" + result "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" + index "github.com/m3db/m3/src/dbnode/storage/index" + repair "github.com/m3db/m3/src/dbnode/storage/repair" + series "github.com/m3db/m3/src/dbnode/storage/series" + ts "github.com/m3db/m3/src/dbnode/ts" + xio "github.com/m3db/m3/src/dbnode/x/xio" + xpool "github.com/m3db/m3/src/dbnode/x/xpool" + context "github.com/m3db/m3/src/x/context" + ident "github.com/m3db/m3/src/x/ident" + instrument "github.com/m3db/m3/src/x/instrument" + mmap "github.com/m3db/m3/src/x/mmap" + pool "github.com/m3db/m3/src/x/pool" + sync "github.com/m3db/m3/src/x/sync" + time "github.com/m3db/m3/src/x/time" + reflect "reflect" + sync0 "sync" + time0 "time" ) // MockIndexedErrorHandler is a mock of IndexedErrorHandler interface @@ -227,7 +205,7 @@ func (mr *MockDatabaseMockRecorder) Terminate() *gomock.Call { } // Write mocks base method -func (m *MockDatabase) Write(ctx context.Context, namespace, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { +func (m *MockDatabase) Write(ctx context.Context, namespace, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, namespace, id, timestamp, value, unit, annotation) ret0, _ := ret[0].(error) @@ -241,7 +219,7 @@ func (mr *MockDatabaseMockRecorder) Write(ctx, namespace, id, timestamp, value, } // WriteTagged mocks base method -func (m *MockDatabase) WriteTagged(ctx context.Context, namespace, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { +func (m *MockDatabase) WriteTagged(ctx context.Context, namespace, id ident.ID, tags ident.TagIterator, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTagged", ctx, namespace, id, tags, timestamp, value, unit, annotation) ret0, _ := ret[0].(error) @@ -328,7 +306,7 @@ func (mr *MockDatabaseMockRecorder) AggregateQuery(ctx, namespace, query, opts i } // ReadEncoded mocks base method -func (m *MockDatabase) ReadEncoded(ctx context.Context, namespace, id ident.ID, start, end time.Time) ([][]xio.BlockReader, error) { +func (m *MockDatabase) ReadEncoded(ctx context.Context, namespace, id ident.ID, start, end time0.Time) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, namespace, id, start, end) ret0, _ := ret[0].([][]xio.BlockReader) @@ -343,7 +321,7 @@ func (mr *MockDatabaseMockRecorder) ReadEncoded(ctx, namespace, id, start, end i } // FetchBlocks mocks base method -func (m *MockDatabase) FetchBlocks(ctx context.Context, namespace ident.ID, shard uint32, id ident.ID, starts []time.Time) ([]block.FetchBlockResult, error) { +func (m *MockDatabase) FetchBlocks(ctx context.Context, namespace ident.ID, shard uint32, id ident.ID, starts []time0.Time) ([]block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, namespace, shard, id, starts) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -358,7 +336,7 @@ func (mr *MockDatabaseMockRecorder) FetchBlocks(ctx, namespace, shard, id, start } // FetchBlocksMetadataV2 mocks base method -func (m *MockDatabase) FetchBlocksMetadataV2(ctx context.Context, namespace ident.ID, shard uint32, start, end time.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { +func (m *MockDatabase) FetchBlocksMetadataV2(ctx context.Context, namespace ident.ID, shard uint32, start, end time0.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadataV2", ctx, namespace, shard, start, end, limit, pageToken, opts) ret0, _ := ret[0].(block.FetchBlocksMetadataResults) @@ -473,7 +451,7 @@ func (mr *MockDatabaseMockRecorder) BootstrapState() *gomock.Call { } // FlushState mocks base method -func (m *MockDatabase) FlushState(namespace ident.ID, shardID uint32, blockStart time.Time) (fileOpState, error) { +func (m *MockDatabase) FlushState(namespace ident.ID, shardID uint32, blockStart time0.Time) (fileOpState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FlushState", namespace, shardID, blockStart) ret0, _ := ret[0].(fileOpState) @@ -622,7 +600,7 @@ func (mr *MockdatabaseMockRecorder) Terminate() *gomock.Call { } // Write mocks base method -func (m *Mockdatabase) Write(ctx context.Context, namespace, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { +func (m *Mockdatabase) Write(ctx context.Context, namespace, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, namespace, id, timestamp, value, unit, annotation) ret0, _ := ret[0].(error) @@ -636,7 +614,7 @@ func (mr *MockdatabaseMockRecorder) Write(ctx, namespace, id, timestamp, value, } // WriteTagged mocks base method -func (m *Mockdatabase) WriteTagged(ctx context.Context, namespace, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { +func (m *Mockdatabase) WriteTagged(ctx context.Context, namespace, id ident.ID, tags ident.TagIterator, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTagged", ctx, namespace, id, tags, timestamp, value, unit, annotation) ret0, _ := ret[0].(error) @@ -723,7 +701,7 @@ func (mr *MockdatabaseMockRecorder) AggregateQuery(ctx, namespace, query, opts i } // ReadEncoded mocks base method -func (m *Mockdatabase) ReadEncoded(ctx context.Context, namespace, id ident.ID, start, end time.Time) ([][]xio.BlockReader, error) { +func (m *Mockdatabase) ReadEncoded(ctx context.Context, namespace, id ident.ID, start, end time0.Time) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, namespace, id, start, end) ret0, _ := ret[0].([][]xio.BlockReader) @@ -738,7 +716,7 @@ func (mr *MockdatabaseMockRecorder) ReadEncoded(ctx, namespace, id, start, end i } // FetchBlocks mocks base method -func (m *Mockdatabase) FetchBlocks(ctx context.Context, namespace ident.ID, shard uint32, id ident.ID, starts []time.Time) ([]block.FetchBlockResult, error) { +func (m *Mockdatabase) FetchBlocks(ctx context.Context, namespace ident.ID, shard uint32, id ident.ID, starts []time0.Time) ([]block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, namespace, shard, id, starts) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -753,7 +731,7 @@ func (mr *MockdatabaseMockRecorder) FetchBlocks(ctx, namespace, shard, id, start } // FetchBlocksMetadataV2 mocks base method -func (m *Mockdatabase) FetchBlocksMetadataV2(ctx context.Context, namespace ident.ID, shard uint32, start, end time.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { +func (m *Mockdatabase) FetchBlocksMetadataV2(ctx context.Context, namespace ident.ID, shard uint32, start, end time0.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadataV2", ctx, namespace, shard, start, end, limit, pageToken, opts) ret0, _ := ret[0].(block.FetchBlocksMetadataResults) @@ -868,7 +846,7 @@ func (mr *MockdatabaseMockRecorder) BootstrapState() *gomock.Call { } // FlushState mocks base method -func (m *Mockdatabase) FlushState(namespace ident.ID, shardID uint32, blockStart time.Time) (fileOpState, error) { +func (m *Mockdatabase) FlushState(namespace ident.ID, shardID uint32, blockStart time0.Time) (fileOpState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FlushState", namespace, shardID, blockStart) ret0, _ := ret[0].(fileOpState) @@ -1224,7 +1202,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) OwnedShards() *gomock.Call { } // Tick mocks base method -func (m *MockdatabaseNamespace) Tick(c context.Cancellable, startTime time.Time) error { +func (m *MockdatabaseNamespace) Tick(c context.Cancellable, startTime time0.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", c, startTime) ret0, _ := ret[0].(error) @@ -1238,7 +1216,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) Tick(c, startTime interface{}) *gom } // Write mocks base method -func (m *MockdatabaseNamespace) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) (ts.Series, bool, error) { +func (m *MockdatabaseNamespace) Write(ctx context.Context, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) (ts.Series, bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation) ret0, _ := ret[0].(ts.Series) @@ -1254,7 +1232,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) Write(ctx, id, timestamp, value, un } // WriteTagged mocks base method -func (m *MockdatabaseNamespace) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) (ts.Series, bool, error) { +func (m *MockdatabaseNamespace) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) (ts.Series, bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTagged", ctx, id, tags, timestamp, value, unit, annotation) ret0, _ := ret[0].(ts.Series) @@ -1300,7 +1278,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) AggregateQuery(ctx, query, opts int } // ReadEncoded mocks base method -func (m *MockdatabaseNamespace) ReadEncoded(ctx context.Context, id ident.ID, start, end time.Time) ([][]xio.BlockReader, error) { +func (m *MockdatabaseNamespace) ReadEncoded(ctx context.Context, id ident.ID, start, end time0.Time) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, id, start, end) ret0, _ := ret[0].([][]xio.BlockReader) @@ -1315,7 +1293,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) ReadEncoded(ctx, id, start, end int } // FetchBlocks mocks base method -func (m *MockdatabaseNamespace) FetchBlocks(ctx context.Context, shardID uint32, id ident.ID, starts []time.Time) ([]block.FetchBlockResult, error) { +func (m *MockdatabaseNamespace) FetchBlocks(ctx context.Context, shardID uint32, id ident.ID, starts []time0.Time) ([]block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, shardID, id, starts) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -1330,7 +1308,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) FetchBlocks(ctx, shardID, id, start } // FetchBlocksMetadataV2 mocks base method -func (m *MockdatabaseNamespace) FetchBlocksMetadataV2(ctx context.Context, shardID uint32, start, end time.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { +func (m *MockdatabaseNamespace) FetchBlocksMetadataV2(ctx context.Context, shardID uint32, start, end time0.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadataV2", ctx, shardID, start, end, limit, pageToken, opts) ret0, _ := ret[0].(block.FetchBlocksMetadataResults) @@ -1375,7 +1353,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) Bootstrap(ctx, bootstrapResult inte } // WarmFlush mocks base method -func (m *MockdatabaseNamespace) WarmFlush(blockStart time.Time, flush persist.FlushPreparer) error { +func (m *MockdatabaseNamespace) WarmFlush(blockStart time0.Time, flush persist.FlushPreparer) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WarmFlush", blockStart, flush) ret0, _ := ret[0].(error) @@ -1417,7 +1395,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) ColdFlush(flush interface{}) *gomoc } // Snapshot mocks base method -func (m *MockdatabaseNamespace) Snapshot(blockStart, snapshotTime time.Time, flush persist.SnapshotPreparer) error { +func (m *MockdatabaseNamespace) Snapshot(blockStart, snapshotTime time0.Time, flush persist.SnapshotPreparer) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Snapshot", blockStart, snapshotTime, flush) ret0, _ := ret[0].(error) @@ -1431,7 +1409,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) Snapshot(blockStart, snapshotTime, } // NeedsFlush mocks base method -func (m *MockdatabaseNamespace) NeedsFlush(alignedInclusiveStart, alignedInclusiveEnd time.Time) (bool, error) { +func (m *MockdatabaseNamespace) NeedsFlush(alignedInclusiveStart, alignedInclusiveEnd time0.Time) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NeedsFlush", alignedInclusiveStart, alignedInclusiveEnd) ret0, _ := ret[0].(bool) @@ -1461,7 +1439,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) Truncate() *gomock.Call { } // Repair mocks base method -func (m *MockdatabaseNamespace) Repair(repairer databaseShardRepairer, tr time0.Range) error { +func (m *MockdatabaseNamespace) Repair(repairer databaseShardRepairer, tr time.Range) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Repair", repairer, tr) ret0, _ := ret[0].(error) @@ -1489,7 +1467,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) BootstrapState() *gomock.Call { } // FlushState mocks base method -func (m *MockdatabaseNamespace) FlushState(shardID uint32, blockStart time.Time) (fileOpState, error) { +func (m *MockdatabaseNamespace) FlushState(shardID uint32, blockStart time0.Time) (fileOpState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FlushState", shardID, blockStart) ret0, _ := ret[0].(fileOpState) @@ -1678,7 +1656,7 @@ func (mr *MockdatabaseShardMockRecorder) BootstrapState() *gomock.Call { } // OnEvictedFromWiredList mocks base method -func (m *MockdatabaseShard) OnEvictedFromWiredList(id ident.ID, blockStart time.Time) { +func (m *MockdatabaseShard) OnEvictedFromWiredList(id ident.ID, blockStart time0.Time) { m.ctrl.T.Helper() m.ctrl.Call(m, "OnEvictedFromWiredList", id, blockStart) } @@ -1704,7 +1682,7 @@ func (mr *MockdatabaseShardMockRecorder) Close() *gomock.Call { } // Tick mocks base method -func (m *MockdatabaseShard) Tick(c context.Cancellable, startTime time.Time, nsCtx namespace.Context) (tickResult, error) { +func (m *MockdatabaseShard) Tick(c context.Cancellable, startTime time0.Time, nsCtx namespace.Context) (tickResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", c, startTime, nsCtx) ret0, _ := ret[0].(tickResult) @@ -1719,7 +1697,7 @@ func (mr *MockdatabaseShardMockRecorder) Tick(c, startTime, nsCtx interface{}) * } // Write mocks base method -func (m *MockdatabaseShard) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts series.WriteOptions) (ts.Series, bool, error) { +func (m *MockdatabaseShard) Write(ctx context.Context, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte, wOpts series.WriteOptions) (ts.Series, bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation, wOpts) ret0, _ := ret[0].(ts.Series) @@ -1735,7 +1713,7 @@ func (mr *MockdatabaseShardMockRecorder) Write(ctx, id, timestamp, value, unit, } // WriteTagged mocks base method -func (m *MockdatabaseShard) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts series.WriteOptions) (ts.Series, bool, error) { +func (m *MockdatabaseShard) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time0.Time, value float64, unit time.Unit, annotation []byte, wOpts series.WriteOptions) (ts.Series, bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTagged", ctx, id, tags, timestamp, value, unit, annotation, wOpts) ret0, _ := ret[0].(ts.Series) @@ -1751,7 +1729,7 @@ func (mr *MockdatabaseShardMockRecorder) WriteTagged(ctx, id, tags, timestamp, v } // ReadEncoded mocks base method -func (m *MockdatabaseShard) ReadEncoded(ctx context.Context, id ident.ID, start, end time.Time, nsCtx namespace.Context) ([][]xio.BlockReader, error) { +func (m *MockdatabaseShard) ReadEncoded(ctx context.Context, id ident.ID, start, end time0.Time, nsCtx namespace.Context) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, id, start, end, nsCtx) ret0, _ := ret[0].([][]xio.BlockReader) @@ -1766,7 +1744,7 @@ func (mr *MockdatabaseShardMockRecorder) ReadEncoded(ctx, id, start, end, nsCtx } // FetchBlocks mocks base method -func (m *MockdatabaseShard) FetchBlocks(ctx context.Context, id ident.ID, starts []time.Time, nsCtx namespace.Context) ([]block.FetchBlockResult, error) { +func (m *MockdatabaseShard) FetchBlocks(ctx context.Context, id ident.ID, starts []time0.Time, nsCtx namespace.Context) ([]block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, id, starts, nsCtx) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -1781,7 +1759,7 @@ func (mr *MockdatabaseShardMockRecorder) FetchBlocks(ctx, id, starts, nsCtx inte } // FetchBlocksForColdFlush mocks base method -func (m *MockdatabaseShard) FetchBlocksForColdFlush(ctx context.Context, seriesID ident.ID, start time.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) { +func (m *MockdatabaseShard) FetchBlocksForColdFlush(ctx context.Context, seriesID ident.ID, start time0.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksForColdFlush", ctx, seriesID, start, version, nsCtx) ret0, _ := ret[0].(block.FetchBlockResult) @@ -1796,7 +1774,7 @@ func (mr *MockdatabaseShardMockRecorder) FetchBlocksForColdFlush(ctx, seriesID, } // FetchBlocksMetadataV2 mocks base method -func (m *MockdatabaseShard) FetchBlocksMetadataV2(ctx context.Context, start, end time.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { +func (m *MockdatabaseShard) FetchBlocksMetadataV2(ctx context.Context, start, end time0.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadataV2", ctx, start, end, limit, pageToken, opts) ret0, _ := ret[0].(block.FetchBlocksMetadataResults) @@ -1866,7 +1844,7 @@ func (mr *MockdatabaseShardMockRecorder) LoadBlocks(series interface{}) *gomock. } // WarmFlush mocks base method -func (m *MockdatabaseShard) WarmFlush(blockStart time.Time, flush persist.FlushPreparer, nsCtx namespace.Context) error { +func (m *MockdatabaseShard) WarmFlush(blockStart time0.Time, flush persist.FlushPreparer, nsCtx namespace.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WarmFlush", blockStart, flush, nsCtx) ret0, _ := ret[0].(error) @@ -1895,7 +1873,7 @@ func (mr *MockdatabaseShardMockRecorder) ColdFlush(flush, resources, nsCtx, onFl } // Snapshot mocks base method -func (m *MockdatabaseShard) Snapshot(blockStart, snapshotStart time.Time, flush persist.SnapshotPreparer, nsCtx namespace.Context) error { +func (m *MockdatabaseShard) Snapshot(blockStart, snapshotStart time0.Time, flush persist.SnapshotPreparer, nsCtx namespace.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Snapshot", blockStart, snapshotStart, flush, nsCtx) ret0, _ := ret[0].(error) @@ -1909,7 +1887,7 @@ func (mr *MockdatabaseShardMockRecorder) Snapshot(blockStart, snapshotStart, flu } // FlushState mocks base method -func (m *MockdatabaseShard) FlushState(blockStart time.Time) (fileOpState, error) { +func (m *MockdatabaseShard) FlushState(blockStart time0.Time) (fileOpState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FlushState", blockStart) ret0, _ := ret[0].(fileOpState) @@ -1924,7 +1902,7 @@ func (mr *MockdatabaseShardMockRecorder) FlushState(blockStart interface{}) *gom } // CleanupExpiredFileSets mocks base method -func (m *MockdatabaseShard) CleanupExpiredFileSets(earliestToRetain time.Time) error { +func (m *MockdatabaseShard) CleanupExpiredFileSets(earliestToRetain time0.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CleanupExpiredFileSets", earliestToRetain) ret0, _ := ret[0].(error) @@ -1952,7 +1930,7 @@ func (mr *MockdatabaseShardMockRecorder) CleanupCompactedFileSets() *gomock.Call } // Repair mocks base method -func (m *MockdatabaseShard) Repair(ctx context.Context, nsCtx namespace.Context, nsMeta namespace.Metadata, tr time0.Range, repairer databaseShardRepairer) (repair.MetadataComparisonResult, error) { +func (m *MockdatabaseShard) Repair(ctx context.Context, nsCtx namespace.Context, nsMeta namespace.Metadata, tr time.Range, repairer databaseShardRepairer) (repair.MetadataComparisonResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Repair", ctx, nsCtx, nsMeta, tr, repairer) ret0, _ := ret[0].(repair.MetadataComparisonResult) @@ -1966,22 +1944,6 @@ func (mr *MockdatabaseShardMockRecorder) Repair(ctx, nsCtx, nsMeta, tr, repairer return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repair", reflect.TypeOf((*MockdatabaseShard)(nil).Repair), ctx, nsCtx, nsMeta, tr, repairer) } -// TagsFromSeriesID mocks base method -func (m *MockdatabaseShard) TagsFromSeriesID(seriesID ident.ID) (ident.Tags, bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TagsFromSeriesID", seriesID) - ret0, _ := ret[0].(ident.Tags) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// TagsFromSeriesID indicates an expected call of TagsFromSeriesID -func (mr *MockdatabaseShardMockRecorder) TagsFromSeriesID(seriesID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagsFromSeriesID", reflect.TypeOf((*MockdatabaseShard)(nil).TagsFromSeriesID), seriesID) -} - // SeriesReadWriteRef mocks base method func (m *MockdatabaseShard) SeriesReadWriteRef(id ident.ID, tags ident.TagIterator, opts ShardSeriesReadWriteRefOptions) (SeriesReadWriteRef, error) { m.ctrl.T.Helper() @@ -2070,10 +2032,10 @@ func (mr *MockNamespaceIndexMockRecorder) AssignShardSet(shardSet interface{}) * } // BlockStartForWriteTime mocks base method -func (m *MockNamespaceIndex) BlockStartForWriteTime(writeTime time.Time) time0.UnixNano { +func (m *MockNamespaceIndex) BlockStartForWriteTime(writeTime time0.Time) time.UnixNano { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BlockStartForWriteTime", writeTime) - ret0, _ := ret[0].(time0.UnixNano) + ret0, _ := ret[0].(time.UnixNano) return ret0 } @@ -2084,7 +2046,7 @@ func (mr *MockNamespaceIndexMockRecorder) BlockStartForWriteTime(writeTime inter } // BlockForBlockStart mocks base method -func (m *MockNamespaceIndex) BlockForBlockStart(blockStart time.Time) (index.Block, error) { +func (m *MockNamespaceIndex) BlockForBlockStart(blockStart time0.Time) (index.Block, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BlockForBlockStart", blockStart) ret0, _ := ret[0].(index.Block) @@ -2171,7 +2133,7 @@ func (mr *MockNamespaceIndexMockRecorder) BootstrapsDone() *gomock.Call { } // CleanupExpiredFileSets mocks base method -func (m *MockNamespaceIndex) CleanupExpiredFileSets(t time.Time) error { +func (m *MockNamespaceIndex) CleanupExpiredFileSets(t time0.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CleanupExpiredFileSets", t) ret0, _ := ret[0].(error) @@ -2199,7 +2161,7 @@ func (mr *MockNamespaceIndexMockRecorder) CleanupDuplicateFileSets() *gomock.Cal } // Tick mocks base method -func (m *MockNamespaceIndex) Tick(c context.Cancellable, startTime time.Time) (namespaceIndexTickResult, error) { +func (m *MockNamespaceIndex) Tick(c context.Cancellable, startTime time0.Time) (namespaceIndexTickResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", c, startTime) ret0, _ := ret[0].(namespaceIndexTickResult) @@ -2322,10 +2284,10 @@ func (mr *MocknamespaceIndexInsertQueueMockRecorder) Stop() *gomock.Call { } // InsertBatch mocks base method -func (m *MocknamespaceIndexInsertQueue) InsertBatch(batch *index.WriteBatch) (*sync.WaitGroup, error) { +func (m *MocknamespaceIndexInsertQueue) InsertBatch(batch *index.WriteBatch) (*sync0.WaitGroup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "InsertBatch", batch) - ret0, _ := ret[0].(*sync.WaitGroup) + ret0, _ := ret[0].(*sync0.WaitGroup) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -2374,10 +2336,10 @@ func (mr *MockdatabaseBootstrapManagerMockRecorder) IsBootstrapped() *gomock.Cal } // LastBootstrapCompletionTime mocks base method -func (m *MockdatabaseBootstrapManager) LastBootstrapCompletionTime() (time.Time, bool) { +func (m *MockdatabaseBootstrapManager) LastBootstrapCompletionTime() (time0.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastBootstrapCompletionTime") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -2439,7 +2401,7 @@ func (m *MockdatabaseFlushManager) EXPECT() *MockdatabaseFlushManagerMockRecorde } // Flush mocks base method -func (m *MockdatabaseFlushManager) Flush(startTime time.Time) error { +func (m *MockdatabaseFlushManager) Flush(startTime time0.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Flush", startTime) ret0, _ := ret[0].(error) @@ -2453,10 +2415,10 @@ func (mr *MockdatabaseFlushManagerMockRecorder) Flush(startTime interface{}) *go } // LastSuccessfulSnapshotStartTime mocks base method -func (m *MockdatabaseFlushManager) LastSuccessfulSnapshotStartTime() (time.Time, bool) { +func (m *MockdatabaseFlushManager) LastSuccessfulSnapshotStartTime() (time0.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -2503,7 +2465,7 @@ func (m *MockdatabaseCleanupManager) EXPECT() *MockdatabaseCleanupManagerMockRec } // Cleanup mocks base method -func (m *MockdatabaseCleanupManager) Cleanup(t time.Time, isBootstrapped bool) error { +func (m *MockdatabaseCleanupManager) Cleanup(t time0.Time, isBootstrapped bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Cleanup", t, isBootstrapped) ret0, _ := ret[0].(error) @@ -2552,7 +2514,7 @@ func (m *MockdatabaseFileSystemManager) EXPECT() *MockdatabaseFileSystemManagerM } // Cleanup mocks base method -func (m *MockdatabaseFileSystemManager) Cleanup(t time.Time, isBootstrapped bool) error { +func (m *MockdatabaseFileSystemManager) Cleanup(t time0.Time, isBootstrapped bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Cleanup", t, isBootstrapped) ret0, _ := ret[0].(error) @@ -2566,7 +2528,7 @@ func (mr *MockdatabaseFileSystemManagerMockRecorder) Cleanup(t, isBootstrapped i } // Flush mocks base method -func (m *MockdatabaseFileSystemManager) Flush(t time.Time) error { +func (m *MockdatabaseFileSystemManager) Flush(t time0.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Flush", t) ret0, _ := ret[0].(error) @@ -2622,7 +2584,7 @@ func (mr *MockdatabaseFileSystemManagerMockRecorder) Status() *gomock.Call { } // Run mocks base method -func (m *MockdatabaseFileSystemManager) Run(t time.Time, runType runType, forceType forceType) bool { +func (m *MockdatabaseFileSystemManager) Run(t time0.Time, runType runType, forceType forceType) bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Run", t, runType, forceType) ret0, _ := ret[0].(bool) @@ -2648,10 +2610,10 @@ func (mr *MockdatabaseFileSystemManagerMockRecorder) Report() *gomock.Call { } // LastSuccessfulSnapshotStartTime mocks base method -func (m *MockdatabaseFileSystemManager) LastSuccessfulSnapshotStartTime() (time.Time, bool) { +func (m *MockdatabaseFileSystemManager) LastSuccessfulSnapshotStartTime() (time0.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -2700,7 +2662,7 @@ func (mr *MockdatabaseShardRepairerMockRecorder) Options() *gomock.Call { } // Repair mocks base method -func (m *MockdatabaseShardRepairer) Repair(ctx context.Context, nsCtx namespace.Context, nsMeta namespace.Metadata, tr time0.Range, shard databaseShard) (repair.MetadataComparisonResult, error) { +func (m *MockdatabaseShardRepairer) Repair(ctx context.Context, nsCtx namespace.Context, nsMeta namespace.Metadata, tr time.Range, shard databaseShard) (repair.MetadataComparisonResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Repair", ctx, nsCtx, nsMeta, tr, shard) ret0, _ := ret[0].(repair.MetadataComparisonResult) @@ -2811,7 +2773,7 @@ func (m *MockdatabaseTickManager) EXPECT() *MockdatabaseTickManagerMockRecorder } // Tick mocks base method -func (m *MockdatabaseTickManager) Tick(forceType forceType, startTime time.Time) error { +func (m *MockdatabaseTickManager) Tick(forceType forceType, startTime time0.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", forceType, startTime) ret0, _ := ret[0].(error) @@ -2876,10 +2838,10 @@ func (mr *MockdatabaseMediatorMockRecorder) IsBootstrapped() *gomock.Call { } // LastBootstrapCompletionTime mocks base method -func (m *MockdatabaseMediator) LastBootstrapCompletionTime() (time.Time, bool) { +func (m *MockdatabaseMediator) LastBootstrapCompletionTime() (time0.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastBootstrapCompletionTime") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -2930,7 +2892,7 @@ func (mr *MockdatabaseMediatorMockRecorder) EnableFileOps() *gomock.Call { } // Tick mocks base method -func (m *MockdatabaseMediator) Tick(forceType forceType, startTime time.Time) error { +func (m *MockdatabaseMediator) Tick(forceType forceType, startTime time0.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", forceType, startTime) ret0, _ := ret[0].(error) @@ -2996,10 +2958,10 @@ func (mr *MockdatabaseMediatorMockRecorder) Report() *gomock.Call { } // LastSuccessfulSnapshotStartTime mocks base method -func (m *MockdatabaseMediator) LastSuccessfulSnapshotStartTime() (time.Time, bool) { +func (m *MockdatabaseMediator) LastSuccessfulSnapshotStartTime() (time0.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime") - ret0, _ := ret[0].(time.Time) + ret0, _ := ret[0].(time0.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -3319,7 +3281,7 @@ func (mr *MockOptionsMockRecorder) RuntimeOptionsManager() *gomock.Call { } // SetErrorWindowForLoad mocks base method -func (m *MockOptions) SetErrorWindowForLoad(value time.Duration) Options { +func (m *MockOptions) SetErrorWindowForLoad(value time0.Duration) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetErrorWindowForLoad", value) ret0, _ := ret[0].(Options) @@ -3333,10 +3295,10 @@ func (mr *MockOptionsMockRecorder) SetErrorWindowForLoad(value interface{}) *gom } // ErrorWindowForLoad mocks base method -func (m *MockOptions) ErrorWindowForLoad() time.Duration { +func (m *MockOptions) ErrorWindowForLoad() time0.Duration { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ErrorWindowForLoad") - ret0, _ := ret[0].(time.Duration) + ret0, _ := ret[0].(time0.Duration) return ret0 } @@ -3935,7 +3897,7 @@ func (mr *MockOptionsMockRecorder) FetchBlocksMetadataResultsPool() *gomock.Call } // SetQueryIDsWorkerPool mocks base method -func (m *MockOptions) SetQueryIDsWorkerPool(value sync0.WorkerPool) Options { +func (m *MockOptions) SetQueryIDsWorkerPool(value sync.WorkerPool) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetQueryIDsWorkerPool", value) ret0, _ := ret[0].(Options) @@ -3949,10 +3911,10 @@ func (mr *MockOptionsMockRecorder) SetQueryIDsWorkerPool(value interface{}) *gom } // QueryIDsWorkerPool mocks base method -func (m *MockOptions) QueryIDsWorkerPool() sync0.WorkerPool { +func (m *MockOptions) QueryIDsWorkerPool() sync.WorkerPool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "QueryIDsWorkerPool") - ret0, _ := ret[0].(sync0.WorkerPool) + ret0, _ := ret[0].(sync.WorkerPool) return ret0 } diff --git a/src/dbnode/storage/types.go b/src/dbnode/storage/types.go index a3c5c22c3e..f73b1d60ba 100644 --- a/src/dbnode/storage/types.go +++ b/src/dbnode/storage/types.go @@ -557,11 +557,6 @@ type databaseShard interface { repairer databaseShardRepairer, ) (repair.MetadataComparisonResult, error) - // TagsFromSeriesID returns the series tags from a series ID. - // TODO(r): Seems like this is a work around that shouldn't be - // necessary given the callsites that current exist? - TagsFromSeriesID(seriesID ident.ID) (ident.Tags, bool, error) - // SeriesReadWriteRef returns a read/write ref to a series, callers // must make sure to call the release callback once finished // with the reference. diff --git a/src/dbnode/ts/types.go b/src/dbnode/ts/types.go index 6799a5c23e..a1eff122c0 100644 --- a/src/dbnode/ts/types.go +++ b/src/dbnode/ts/types.go @@ -81,9 +81,6 @@ type Series struct { // ID is the series identifier. ID ident.ID - // Tags are the series tags. - Tags ident.Tags - // EncodedTags are the series encoded tags, if set then call sites can // avoid needing to encoded the tags from the series tags provided. EncodedTags EncodedTags diff --git a/src/dbnode/x/xio/io_mock.go b/src/dbnode/x/xio/io_mock.go index daa99ba481..6005d38e3b 100644 --- a/src/dbnode/x/xio/io_mock.go +++ b/src/dbnode/x/xio/io_mock.go @@ -1,26 +1,6 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/x/xio (interfaces: SegmentReader,SegmentReaderPool) -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Package xio is a generated GoMock package. package xio diff --git a/src/m3ninx/doc/document.go b/src/m3ninx/doc/document.go index bee8e1dcc5..164c0f210e 100644 --- a/src/m3ninx/doc/document.go +++ b/src/m3ninx/doc/document.go @@ -156,10 +156,15 @@ func (d Document) Validate() error { return ErrEmptyDocument } + if !utf8.Valid(d.ID) { + return fmt.Errorf("document has invalid ID: id=%v, id_hex=%x", d.ID, d.ID) + } + for _, f := range d.Fields { // TODO: Should we enforce uniqueness of field names? if !utf8.Valid(f.Name) { - return fmt.Errorf("document contains invalid field name: %v", f.Name) + return fmt.Errorf("document has invalid field name: name=%v, name_hex=%x", + f.Name, f.Name) } if bytes.Equal(f.Name, IDReservedFieldName) { @@ -167,7 +172,8 @@ func (d Document) Validate() error { } if !utf8.Valid(f.Value) { - return fmt.Errorf("document contains invalid field value: %v", f.Value) + return fmt.Errorf("document has invalid field value: value=%v, value_hex=%x", + f.Value, f.Value) } } diff --git a/src/x/ident/identifier_pool.go b/src/x/ident/identifier_pool.go index 211c00392b..8239d818f4 100644 --- a/src/x/ident/identifier_pool.go +++ b/src/x/ident/identifier_pool.go @@ -81,7 +81,7 @@ func NewPool( }) p.tagArrayPool.Init() p.itersPool.Init(func() interface{} { - return newTagSliceIter(Tags{}, p) + return newTagSliceIter(Tags{}, nil, p) }) return p diff --git a/src/x/ident/tag_iterator.go b/src/x/ident/tag_iterator.go index 88501ca701..760c75b377 100644 --- a/src/x/ident/tag_iterator.go +++ b/src/x/ident/tag_iterator.go @@ -22,6 +22,8 @@ package ident import ( "errors" + + "github.com/m3db/m3/src/m3ninx/doc" ) var ( @@ -52,29 +54,69 @@ func NewTagStringsIterator(inputs ...string) (TagIterator, error) { // NewTagsIterator returns a TagsIterator over a set of tags. func NewTagsIterator(tags Tags) TagsIterator { - return newTagSliceIter(tags, nil) + return newTagSliceIter(tags, nil, nil) +} + +// NewFieldsTagsIterator returns a TagsIterator over a set of fields. +func NewFieldsTagsIterator(fields []doc.Field) TagsIterator { + return newTagSliceIter(Tags{}, fields, nil) } func newTagSliceIter( tags Tags, + fields []doc.Field, pool Pool, ) *tagSliceIter { - iter := &tagSliceIter{pool: pool} - iter.Reset(tags) + iter := &tagSliceIter{ + nameBytesID: NewReuseableBytesID(), + valueBytesID: NewReuseableBytesID(), + pool: pool, + } + iter.currentReuseableTag = Tag{ + Name: iter.nameBytesID, + Value: iter.valueBytesID, + } + if len(tags.Values()) > 0 { + iter.Reset(tags) + } else { + iter.ResetFields(fields) + } return iter } +type tagsSliceType uint + +const ( + tagSliceType tagsSliceType = iota + fieldSliceType +) + +type tagsSlice struct { + tags []Tag + fields []doc.Field +} + type tagSliceIter struct { - backingSlice []Tag - currentIdx int - currentTag Tag - pool Pool + backingSlice tagsSlice + currentIdx int + currentTag Tag + currentReuseableTag Tag + nameBytesID *ReuseableBytesID + valueBytesID *ReuseableBytesID + pool Pool } func (i *tagSliceIter) Next() bool { i.currentIdx++ - if i.currentIdx < len(i.backingSlice) { - i.currentTag = i.backingSlice[i.currentIdx] + l, t := i.lengthAndType() + if i.currentIdx < l { + if t == tagSliceType { + i.currentTag = i.backingSlice.tags[i.currentIdx] + } else { + i.nameBytesID.Reset(i.backingSlice.fields[i.currentIdx].Name) + i.valueBytesID.Reset(i.backingSlice.fields[i.currentIdx].Value) + i.currentTag = i.currentReuseableTag + } return true } i.currentTag = Tag{} @@ -97,7 +139,7 @@ func (i *tagSliceIter) Err() error { } func (i *tagSliceIter) Close() { - i.backingSlice = nil + i.backingSlice = tagsSlice{} i.currentIdx = 0 i.currentTag = Tag{} @@ -109,11 +151,19 @@ func (i *tagSliceIter) Close() { } func (i *tagSliceIter) Len() int { - return len(i.backingSlice) + l, _ := i.lengthAndType() + return l +} + +func (i *tagSliceIter) lengthAndType() (int, tagsSliceType) { + if l := len(i.backingSlice.tags); l > 0 { + return l, tagSliceType + } + return len(i.backingSlice.fields), fieldSliceType } func (i *tagSliceIter) Remaining() int { - if r := len(i.backingSlice) - 1 - i.currentIdx; r >= 0 { + if r := i.Len() - 1 - i.currentIdx; r >= 0 { return r } return 0 @@ -122,7 +172,12 @@ func (i *tagSliceIter) Remaining() int { func (i *tagSliceIter) Duplicate() TagIterator { if i.pool != nil { iter := i.pool.TagsIterator() - iter.Reset(Tags{values: i.backingSlice}) + if len(i.backingSlice.tags) > 0 { + iter.Reset(Tags{values: i.backingSlice.tags}) + } else { + iter.ResetFields(i.backingSlice.fields) + } + for j := 0; j <= i.currentIdx; j++ { iter.Next() } @@ -141,7 +196,12 @@ func (i *tagSliceIter) rewind() { } func (i *tagSliceIter) Reset(tags Tags) { - i.backingSlice = tags.Values() + i.backingSlice = tagsSlice{tags: tags.Values()} + i.rewind() +} + +func (i *tagSliceIter) ResetFields(fields []doc.Field) { + i.backingSlice = tagsSlice{fields: fields} i.rewind() } diff --git a/src/x/ident/types.go b/src/x/ident/types.go index e6b8d49b40..0334cc546c 100644 --- a/src/x/ident/types.go +++ b/src/x/ident/types.go @@ -24,6 +24,7 @@ package ident import ( "fmt" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/context" ) @@ -230,6 +231,9 @@ type TagsIterator interface { // Reset allows the tag iterator to be reused with a new set of tags. Reset(tags Tags) + + // ResetFields allows tag iterator to be reused from a set of fields. + ResetFields(fields []doc.Field) } // Tags is a collection of Tag instances that can be pooled. From bcbab921e4596d15d70da77bb56dba11ff6f310f Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 01:46:44 -0400 Subject: [PATCH 02/37] Flow index inserts through CPU local insert queue in batch, avoid shard insert queue --- src/dbnode/generated/mocks/generate.go | 2 +- .../server/tchannelthrift/node/service.go | 16 +- src/dbnode/persist/fs/commitlog/commit_log.go | 13 +- .../persist/fs/commitlog/commit_log_mock.go | 3 +- src/dbnode/persist/fs/commitlog/types.go | 3 +- src/dbnode/server/server.go | 3 +- src/dbnode/storage/database.go | 61 ++-- src/dbnode/storage/index.go | 18 ++ src/dbnode/storage/index_insert_queue.go | 156 ++++++++--- src/dbnode/storage/namespace.go | 28 +- src/dbnode/storage/options.go | 10 +- src/dbnode/storage/shard.go | 77 ++++-- src/dbnode/storage/storage_mock.go | 30 +- src/dbnode/storage/types.go | 45 ++- src/dbnode/ts/types.go | 87 ------ src/dbnode/ts/write_batch_mock.go | 261 ------------------ src/dbnode/ts/writes/types.go | 129 +++++++++ src/dbnode/ts/{ => writes}/write_batch.go | 45 ++- .../ts/{ => writes}/write_batch_pool.go | 2 +- .../ts/{ => writes}/write_batch_test.go | 2 +- src/x/sync/cpu.go | 11 + src/x/sync/cpu_linux_amd64.go | 4 + src/x/sync/cpu_linux_amd64.s | 15 + src/x/sync/map_cpus.go | 62 +++++ 24 files changed, 582 insertions(+), 501 deletions(-) delete mode 100644 src/dbnode/ts/write_batch_mock.go create mode 100644 src/dbnode/ts/writes/types.go rename src/dbnode/ts/{ => writes}/write_batch.go (84%) rename src/dbnode/ts/{ => writes}/write_batch_pool.go (99%) rename src/dbnode/ts/{ => writes}/write_batch_test.go (99%) create mode 100644 src/x/sync/cpu.go create mode 100644 src/x/sync/cpu_linux_amd64.go create mode 100644 src/x/sync/cpu_linux_amd64.s create mode 100644 src/x/sync/map_cpus.go diff --git a/src/dbnode/generated/mocks/generate.go b/src/dbnode/generated/mocks/generate.go index 7c8d6ac8da..0dc98bc96b 100644 --- a/src/dbnode/generated/mocks/generate.go +++ b/src/dbnode/generated/mocks/generate.go @@ -42,7 +42,7 @@ //go:generate sh -c "mockgen -package=namespace -destination=$GOPATH/src/$PACKAGE/src/dbnode/namespace/namespace_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/namespace/types.go" //go:generate sh -c "mockgen -package=kvadmin -destination=$GOPATH/src/$PACKAGE/src/dbnode/namespace/kvadmin/kvadmin_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/namespace/kvadmin/types.go" //go:generate sh -c "mockgen -package=runtime -destination=$GOPATH/src/$PACKAGE/src/dbnode/runtime/runtime_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/runtime/types.go" -//go:generate sh -c "mockgen -package=ts -destination=$GOPATH/src/$PACKAGE/src/dbnode/ts/write_batch_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/ts/types.go" +//go:generate sh -c "mockgen -package=writes -destination=$GOPATH/src/$PACKAGE/src/dbnode/ts/writes/write_batch_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/ts/writes/types.go" //go:generate sh -c "mockgen -package=index -destination=$GOPATH/src/$PACKAGE/src/dbnode/storage/index/index_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/storage/index/types.go" package mocks diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index f7e88d4230..ef98f4c69c 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -37,7 +37,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/tracepoint" - "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/dbnode/x/xio" "github.com/m3db/m3/src/dbnode/x/xpool" "github.com/m3db/m3/src/x/checked" @@ -1414,7 +1414,7 @@ func (s *service) WriteBatchRaw(tctx thrift.Context, req *rpc.WriteBatchRawReque ) } - err = db.WriteBatch(ctx, nsID, batchWriter.(ts.WriteBatch), + err = db.WriteBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq) if err != nil { return convert.ToRPCError(err) @@ -1474,7 +1474,7 @@ func (s *service) WriteBatchRawV2(tctx thrift.Context, req *rpc.WriteBatchRawV2R var ( nsID ident.ID nsIdx int64 - batchWriter ts.BatchWriter + batchWriter writes.BatchWriter retryableErrors int nonRetryableErrors int @@ -1482,7 +1482,7 @@ func (s *service) WriteBatchRawV2(tctx thrift.Context, req *rpc.WriteBatchRawV2R for i, elem := range req.Elements { if nsID == nil || elem.NameSpace != nsIdx { if batchWriter != nil { - err = db.WriteBatch(ctx, nsID, batchWriter.(ts.WriteBatch), pooledReq) + err = db.WriteBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq) if err != nil { return convert.ToRPCError(err) } @@ -1529,7 +1529,7 @@ func (s *service) WriteBatchRawV2(tctx thrift.Context, req *rpc.WriteBatchRawV2R if batchWriter != nil { // Write the last batch. - err = db.WriteBatch(ctx, nsID, batchWriter.(ts.WriteBatch), pooledReq) + err = db.WriteBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq) if err != nil { return convert.ToRPCError(err) } @@ -1684,7 +1684,7 @@ func (s *service) WriteTaggedBatchRawV2(tctx thrift.Context, req *rpc.WriteTagge var ( nsID ident.ID nsIdx int64 - batchWriter ts.BatchWriter + batchWriter writes.BatchWriter retryableErrors int nonRetryableErrors int @@ -1692,7 +1692,7 @@ func (s *service) WriteTaggedBatchRawV2(tctx thrift.Context, req *rpc.WriteTagge for i, elem := range req.Elements { if nsID == nil || elem.NameSpace != nsIdx { if batchWriter != nil { - err = db.WriteTaggedBatch(ctx, nsID, batchWriter.(ts.WriteBatch), pooledReq) + err = db.WriteTaggedBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq) if err != nil { return convert.ToRPCError(err) } @@ -1749,7 +1749,7 @@ func (s *service) WriteTaggedBatchRawV2(tctx thrift.Context, req *rpc.WriteTagge if batchWriter != nil { // Write the last batch. - err = db.WriteTaggedBatch(ctx, nsID, batchWriter.(ts.WriteBatch), pooledReq) + err = db.WriteTaggedBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq) if err != nil { return convert.ToRPCError(err) } diff --git a/src/dbnode/persist/fs/commitlog/commit_log.go b/src/dbnode/persist/fs/commitlog/commit_log.go index 3df38972ce..40ab3b8298 100644 --- a/src/dbnode/persist/fs/commitlog/commit_log.go +++ b/src/dbnode/persist/fs/commitlog/commit_log.go @@ -31,6 +31,7 @@ import ( "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/x/context" xerrors "github.com/m3db/m3/src/x/errors" xtime "github.com/m3db/m3/src/x/time" @@ -65,8 +66,8 @@ type commitLogFailFn func(err error) // we can handle both cases without having to allocate as slice of size // 1 to handle a single write. type writeOrWriteBatch struct { - write ts.Write - writeBatch ts.WriteBatch + write writes.Write + writeBatch writes.WriteBatch } type commitLog struct { @@ -449,8 +450,8 @@ func (l *commitLog) write() { // We use these to make the batch and non-batched write paths the same // by turning non-batched writes into a batch of size one while avoiding // any allocations. - var singleBatch = make([]ts.BatchWrite, 1) - var batch []ts.BatchWrite + var singleBatch = make([]writes.BatchWrite, 1) + var batch []writes.BatchWrite for write := range l.writes { if write.eventType == flushEventType { @@ -720,7 +721,7 @@ func (l *commitLog) Write( annotation ts.Annotation, ) error { return l.writeFn(ctx, writeOrWriteBatch{ - write: ts.Write{ + write: writes.Write{ Series: series, Datapoint: datapoint, Unit: unit, @@ -731,7 +732,7 @@ func (l *commitLog) Write( func (l *commitLog) WriteBatch( ctx context.Context, - writes ts.WriteBatch, + writes writes.WriteBatch, ) error { return l.writeFn(ctx, writeOrWriteBatch{ writeBatch: writes, diff --git a/src/dbnode/persist/fs/commitlog/commit_log_mock.go b/src/dbnode/persist/fs/commitlog/commit_log_mock.go index 4eb0e1eb0a..fe0fa8c15c 100644 --- a/src/dbnode/persist/fs/commitlog/commit_log_mock.go +++ b/src/dbnode/persist/fs/commitlog/commit_log_mock.go @@ -32,6 +32,7 @@ import ( "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/instrument" @@ -93,7 +94,7 @@ func (mr *MockCommitLogMockRecorder) Write(ctx, series, datapoint, unit, annotat } // WriteBatch mocks base method -func (m *MockCommitLog) WriteBatch(ctx context.Context, writes ts.WriteBatch) error { +func (m *MockCommitLog) WriteBatch(ctx context.Context, writes writes.WriteBatch) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteBatch", ctx, writes) ret0, _ := ret[0].(error) diff --git a/src/dbnode/persist/fs/commitlog/types.go b/src/dbnode/persist/fs/commitlog/types.go index cd13dae10b..05ff965db9 100644 --- a/src/dbnode/persist/fs/commitlog/types.go +++ b/src/dbnode/persist/fs/commitlog/types.go @@ -27,6 +27,7 @@ import ( "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/instrument" @@ -66,7 +67,7 @@ type CommitLog interface { // WriteBatch is the same as Write, but in batch. WriteBatch( ctx context.Context, - writes ts.WriteBatch, + writes writes.WriteBatch, ) error // Close the commit log diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index 7831e6679c..44dc8a5bcb 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -69,6 +69,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/stats" "github.com/m3db/m3/src/dbnode/topology" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" xtchannel "github.com/m3db/m3/src/dbnode/x/tchannel" "github.com/m3db/m3/src/dbnode/x/xio" "github.com/m3db/m3/src/dbnode/x/xpool" @@ -1402,7 +1403,7 @@ func withEncodingAndPoolingOptions( InstrumentOptions(). SetMetricsScope(scope.SubScope("write-batch-pool"))) - writeBatchPool := ts.NewWriteBatchPool( + writeBatchPool := writes.NewWriteBatchPool( writeBatchPoolOpts, writeBatchPoolInitialBatchSize, writeBatchPoolMaxBatchSize) diff --git a/src/dbnode/storage/database.go b/src/dbnode/storage/database.go index 972b810e85..da90615829 100644 --- a/src/dbnode/storage/database.go +++ b/src/dbnode/storage/database.go @@ -37,6 +37,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/tracepoint" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/dbnode/x/xio" "github.com/m3db/m3/src/x/context" xerrors "github.com/m3db/m3/src/x/errors" @@ -111,7 +112,7 @@ type db struct { metrics databaseMetrics log *zap.Logger - writeBatchPool *ts.WriteBatchPool + writeBatchPool *writes.WriteBatchPool } type databaseMetrics struct { @@ -584,12 +585,12 @@ func (d *db) Write( return err } - series, wasWritten, err := n.Write(ctx, id, timestamp, value, unit, annotation) + seriesWrite, err := n.Write(ctx, id, timestamp, value, unit, annotation) if err != nil { return err } - if !n.Options().WritesToCommitLog() || !wasWritten { + if !n.Options().WritesToCommitLog() || !seriesWrite.WasWritten { return nil } @@ -599,7 +600,7 @@ func (d *db) Write( Value: value, } - return d.commitLog.Write(ctx, series, dp, unit, annotation) + return d.commitLog.Write(ctx, seriesWrite.Series, dp, unit, annotation) } func (d *db) WriteTagged( @@ -618,12 +619,12 @@ func (d *db) WriteTagged( return err } - series, wasWritten, err := n.WriteTagged(ctx, id, tags, timestamp, value, unit, annotation) + seriesWrite, err := n.WriteTagged(ctx, id, tags, timestamp, value, unit, annotation) if err != nil { return err } - if !n.Options().WritesToCommitLog() || !wasWritten { + if !n.Options().WritesToCommitLog() || !seriesWrite.WasWritten { return nil } @@ -633,10 +634,10 @@ func (d *db) WriteTagged( Value: value, } - return d.commitLog.Write(ctx, series, dp, unit, annotation) + return d.commitLog.Write(ctx, seriesWrite.Series, dp, unit, annotation) } -func (d *db) BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, error) { +func (d *db) BatchWriter(namespace ident.ID, batchSize int) (writes.BatchWriter, error) { n, err := d.namespaceFor(namespace) if err != nil { d.metrics.unknownNamespaceBatchWriter.Inc(1) @@ -654,7 +655,7 @@ func (d *db) BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, err func (d *db) WriteBatch( ctx context.Context, namespace ident.ID, - writer ts.BatchWriter, + writer writes.BatchWriter, errHandler IndexedErrorHandler, ) error { return d.writeBatch(ctx, namespace, writer, errHandler, false) @@ -663,7 +664,7 @@ func (d *db) WriteBatch( func (d *db) WriteTaggedBatch( ctx context.Context, namespace ident.ID, - writer ts.BatchWriter, + writer writes.BatchWriter, errHandler IndexedErrorHandler, ) error { return d.writeBatch(ctx, namespace, writer, errHandler, true) @@ -672,7 +673,7 @@ func (d *db) WriteTaggedBatch( func (d *db) writeBatch( ctx context.Context, namespace ident.ID, - writer ts.BatchWriter, + writer writes.BatchWriter, errHandler IndexedErrorHandler, tagged bool, ) error { @@ -685,7 +686,7 @@ func (d *db) writeBatch( } defer sp.Finish() - writes, ok := writer.(ts.WriteBatch) + writes, ok := writer.(writes.WriteBatch) if !ok { return errWriterDoesNotImplementWriteBatch } @@ -703,13 +704,12 @@ func (d *db) writeBatch( iter := writes.Iter() for i, write := range iter { var ( - series ts.Series - wasWritten bool - err error + seriesWrite SeriesWrite + err error ) if tagged { - series, wasWritten, err = n.WriteTagged( + seriesWrite, err = n.WriteTagged( ctx, write.Write.Series.ID, write.TagIter, @@ -719,7 +719,7 @@ func (d *db) writeBatch( write.Write.Annotation, ) } else { - series, wasWritten, err = n.Write( + seriesWrite, err = n.Write( ctx, write.Write.Series.ID, write.Write.Datapoint.Timestamp, @@ -732,6 +732,8 @@ func (d *db) writeBatch( // Return errors with the original index provided by the caller so they // can associate the error with the write that caused it. errHandler.HandleError(write.OriginalIndex, err) + writes.SetError(i, err) + continue } // Need to set the outcome in the success case so the commitlog gets the @@ -739,13 +741,34 @@ func (d *db) writeBatch( // whose lifecycle lives longer than the span of this request, making them // safe for use by the async commitlog. Need to set the outcome in the // error case so that the commitlog knows to skip this entry. - writes.SetOutcome(i, series, err) - if !wasWritten || err != nil { + writes.SetSeries(i, seriesWrite.Series) + + if !seriesWrite.WasWritten { // This series has no additional information that needs to be written to // the commit log; set this series to skip writing to the commit log. writes.SetSkipWrite(i) } + + if seriesWrite.NeedsIndex { + writes.SetPendingIndex(i, seriesWrite.PendingIndexInsert) + } + } + + // Now insert all pending index inserts together in one go + // to limit lock contention. + if pending := writes.PendingIndex(); len(pending) > 0 { + err := n.WritePendingIndexInserts(pending) + if err != nil { + // Mark those as pending index with an error. + for i, write := range iter { + if write.PendingIndex { + errHandler.HandleError(write.OriginalIndex, err) + writes.SetError(i, err) + } + } + } } + if !n.Options().WritesToCommitLog() { // Finalize here because we can't rely on the commitlog to do it since // we're not using it. diff --git a/src/dbnode/storage/index.go b/src/dbnode/storage/index.go index 7b67b9a386..bced7113f4 100644 --- a/src/dbnode/storage/index.go +++ b/src/dbnode/storage/index.go @@ -45,6 +45,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/index/convert" "github.com/m3db/m3/src/dbnode/storage/series" "github.com/m3db/m3/src/dbnode/tracepoint" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/m3ninx/idx" m3ninxindex "github.com/m3db/m3/src/m3ninx/index" @@ -568,6 +569,23 @@ func (i *nsIndex) WriteBatch( return nil } +func (i *nsIndex) WritePending( + pending []writes.PendingIndexInsert, +) error { + i.state.RLock() + if !i.isOpenWithRLock() { + i.state.RUnlock() + i.metrics.insertAfterClose.Inc(1) + err := errDbIndexUnableToWriteClosed + return err + } + _, err := i.state.insertQueue.InsertPending(pending) + // release the lock because we don't need it past this point. + i.state.RUnlock() + + return err +} + // WriteBatches is called by the indexInsertQueue. func (i *nsIndex) writeBatches( batch *index.WriteBatch, diff --git a/src/dbnode/storage/index_insert_queue.go b/src/dbnode/storage/index_insert_queue.go index e127c15c10..1b2190767c 100644 --- a/src/dbnode/storage/index_insert_queue.go +++ b/src/dbnode/storage/index_insert_queue.go @@ -22,12 +22,15 @@ package storage import ( "errors" + "runtime" "sync" "time" "github.com/m3db/m3/src/dbnode/clock" "github.com/m3db/m3/src/dbnode/namespace" "github.com/m3db/m3/src/dbnode/storage/index" + "github.com/m3db/m3/src/dbnode/ts/writes" + xsync "github.com/m3db/m3/src/x/sync" "github.com/uber-go/tally" ) @@ -45,7 +48,7 @@ const ( nsIndexInsertQueueStateClosed // TODO(prateek): runtime options for this stuff - defaultIndexBatchBackoff = time.Millisecond + defaultIndexBatchBackoff = 2 * time.Millisecond indexResetAllInsertsEvery = 30 * time.Second ) @@ -115,7 +118,7 @@ func (q *nsIndexInsertQueue) insertLoop() { }() var lastInsert time.Time - freeBatch := q.newBatch() + batch := q.newBatch() for range q.notifyInsert { // Check if inserting too fast elapsedSinceLastInsert := q.nowFn().Sub(lastInsert) @@ -124,59 +127,74 @@ func (q *nsIndexInsertQueue) insertLoop() { var ( state nsIndexInsertQueueState backoff time.Duration - batch *nsIndexInsertBatch ) q.Lock() state = q.state if elapsedSinceLastInsert < q.indexBatchBackoff { // Need to backoff before rotate and insert backoff = q.indexBatchBackoff - elapsedSinceLastInsert - } else { - // No backoff required, rotate and go - batch = q.currBatch - q.currBatch = freeBatch } q.Unlock() + if state != nsIndexInsertQueueStateOpen { + return // Break if the queue closed + } if backoff > 0 { q.sleepFn(backoff) - q.Lock() - // Rotate after backoff - batch = q.currBatch - q.currBatch = freeBatch - q.Unlock() } - if len(batch.shardInserts) > 0 { - all := batch.AllInserts() + // Rotate after backoff + batchWg := q.currBatch.Rotate(batch) + + all := batch.AllInserts() + if all.Len() > 0 { q.indexBatchFn(all) } - batch.wg.Done() - // Set the free batch - batch.Reset() - freeBatch = batch + batchWg.Done() lastInsert = q.nowFn() - - if state != nsIndexInsertQueueStateOpen { - return // Break if the queue closed - } } } +func (q *nsIndexInsertQueue) rotate(target *nsIndexInsertBatch) { + +} + func (q *nsIndexInsertQueue) InsertBatch( batch *index.WriteBatch, ) (*sync.WaitGroup, error) { - q.Lock() - if q.state != nsIndexInsertQueueStateOpen { - q.Unlock() - return nil, errIndexInsertQueueNotOpen - } batchLen := batch.Len() - q.currBatch.shardInserts = append(q.currBatch.shardInserts, batch) - wg := q.currBatch.wg - q.Unlock() + + // Choose the queue relevant to current CPU index + inserts := q.currBatch.insertsByCPU[xsync.IndexCPU()] + inserts.Lock() + inserts.shardInserts = append(inserts.shardInserts, batch) + wg := inserts.wg + inserts.Unlock() + + // Notify insert loop + select { + case q.notifyInsert <- struct{}{}: + default: + // Loop busy, already ready to consume notification + } + + q.metrics.numPending.Inc(int64(batchLen)) + return wg, nil +} + +func (q *nsIndexInsertQueue) InsertPending( + pending []writes.PendingIndexInsert, +) (*sync.WaitGroup, error) { + batchLen := len(pending) + + // Choose the queue relevant to current CPU index + inserts := q.currBatch.insertsByCPU[xsync.IndexCPU()] + inserts.Lock() + inserts.batchInserts = append(inserts.batchInserts, pending...) + wg := inserts.wg + inserts.Unlock() // Notify insert loop select { @@ -232,11 +250,18 @@ type nsIndexInsertBatch struct { namespace namespace.Metadata nowFn clock.NowFn wg *sync.WaitGroup - shardInserts []*index.WriteBatch + insertsByCPU []*insertsByCPU allInserts *index.WriteBatch allInsertsLastReset time.Time } +type insertsByCPU struct { + sync.Mutex + shardInserts []*index.WriteBatch + batchInserts []writes.PendingIndexInsert + wg *sync.WaitGroup +} + func newNsIndexInsertBatch( namespace namespace.Metadata, nowFn clock.NowFn, @@ -245,8 +270,12 @@ func newNsIndexInsertBatch( namespace: namespace, nowFn: nowFn, } + numCPU := runtime.NumCPU() + for i := 0; i < numCPU; i++ { + b.insertsByCPU = append(b.insertsByCPU, &insertsByCPU{}) + } b.allocateAllInserts() - b.Reset() + b.Rotate(nil) return b } @@ -259,25 +288,72 @@ func (b *nsIndexInsertBatch) allocateAllInserts() { func (b *nsIndexInsertBatch) AllInserts() *index.WriteBatch { b.allInserts.Reset() - for _, shardInserts := range b.shardInserts { - b.allInserts.AppendAll(shardInserts) + for _, inserts := range b.insertsByCPU { + inserts.Lock() + for _, shardInserts := range inserts.shardInserts { + b.allInserts.AppendAll(shardInserts) + } + for _, insert := range inserts.batchInserts { + b.allInserts.Append(insert.Entry, insert.Document) + } + inserts.Unlock() } return b.allInserts } -func (b *nsIndexInsertBatch) Reset() { +func (b *nsIndexInsertBatch) Rotate(target *nsIndexInsertBatch) *sync.WaitGroup { + prevWg := b.wg + + // We always expect to be waiting for an index. b.wg = &sync.WaitGroup{} - // We always expect to be waiting for an index b.wg.Add(1) - for i := range b.shardInserts { - // TODO(prateek): if we start pooling `[]index.WriteBatchEntry`, then we could return to the pool here. - b.shardInserts[i] = nil + + // Rotate to target if we need to. + if target != nil { + for idx, inserts := range b.insertsByCPU { + targetInserts := target.insertsByCPU[idx] + targetInserts.Lock() + + // Reset the target inserts since we'll take ref to them in a second. + for i := range targetInserts.shardInserts { + // TODO(prateek): if we start pooling `[]index.WriteBatchEntry`, then we could return to the pool here. + targetInserts.shardInserts[i] = nil + } + prevTargetInserts := targetInserts.shardInserts[:0] + + // memset optimization + var zero writes.PendingIndexInsert + for i := range targetInserts.batchInserts { + targetInserts.batchInserts[i] = zero + } + prevTargetBatchInserts := targetInserts.batchInserts[:0] + + inserts.Lock() + + // Copy current slices to target. + targetInserts.shardInserts = inserts.shardInserts + targetInserts.batchInserts = inserts.batchInserts + targetInserts.wg = inserts.wg + + // Reuse the target's old slices. + inserts.shardInserts = prevTargetInserts + inserts.batchInserts = prevTargetBatchInserts + + // Use new wait group. + inserts.wg = b.wg + + inserts.Unlock() + + targetInserts.Unlock() + } } - b.shardInserts = b.shardInserts[:0] + if b.nowFn().Sub(b.allInsertsLastReset) > indexResetAllInsertsEvery { // NB(r): Sometimes this can grow very high, so we reset it relatively frequently b.allocateAllInserts() } + + return prevWg } type nsIndexInsertQueueMetrics struct { diff --git a/src/dbnode/storage/namespace.go b/src/dbnode/storage/namespace.go index 61c6fa9ecb..b5d75c2033 100644 --- a/src/dbnode/storage/namespace.go +++ b/src/dbnode/storage/namespace.go @@ -40,6 +40,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/series" "github.com/m3db/m3/src/dbnode/tracepoint" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/dbnode/x/xio" xclose "github.com/m3db/m3/src/x/close" "github.com/m3db/m3/src/x/context" @@ -663,21 +664,21 @@ func (n *dbNamespace) Write( value float64, unit xtime.Unit, annotation []byte, -) (ts.Series, bool, error) { +) (SeriesWrite, error) { callStart := n.nowFn() shard, nsCtx, err := n.shardFor(id) if err != nil { n.metrics.write.ReportError(n.nowFn().Sub(callStart)) - return ts.Series{}, false, err + return SeriesWrite{}, err } opts := series.WriteOptions{ TruncateType: n.opts.TruncateType(), SchemaDesc: nsCtx.Schema, } - series, wasWritten, err := shard.Write(ctx, id, timestamp, + seriesWrite, err := shard.Write(ctx, id, timestamp, value, unit, annotation, opts) n.metrics.write.ReportSuccessOrError(err, n.nowFn().Sub(callStart)) - return series, wasWritten, err + return seriesWrite, err } func (n *dbNamespace) WriteTagged( @@ -688,25 +689,34 @@ func (n *dbNamespace) WriteTagged( value float64, unit xtime.Unit, annotation []byte, -) (ts.Series, bool, error) { +) (SeriesWrite, error) { callStart := n.nowFn() if n.reverseIndex == nil { // only happens if indexing is enabled. n.metrics.writeTagged.ReportError(n.nowFn().Sub(callStart)) - return ts.Series{}, false, errNamespaceIndexingDisabled + return SeriesWrite{}, errNamespaceIndexingDisabled } shard, nsCtx, err := n.shardFor(id) if err != nil { n.metrics.writeTagged.ReportError(n.nowFn().Sub(callStart)) - return ts.Series{}, false, err + return SeriesWrite{}, err } opts := series.WriteOptions{ TruncateType: n.opts.TruncateType(), SchemaDesc: nsCtx.Schema, } - series, wasWritten, err := shard.WriteTagged(ctx, id, tags, timestamp, + seriesWrite, err := shard.WriteTagged(ctx, id, tags, timestamp, value, unit, annotation, opts) n.metrics.writeTagged.ReportSuccessOrError(err, n.nowFn().Sub(callStart)) - return series, wasWritten, err + return seriesWrite, err +} + +func (n *dbNamespace) WritePendingIndexInserts( + pending []writes.PendingIndexInsert, +) error { + if n.reverseIndex == nil { // only happens if indexing is enabled. + return errNamespaceIndexingDisabled + } + return n.reverseIndex.WritePending(pending) } func (n *dbNamespace) SeriesReadWriteRef( diff --git a/src/dbnode/storage/options.go b/src/dbnode/storage/options.go index d53178470f..92200a2823 100644 --- a/src/dbnode/storage/options.go +++ b/src/dbnode/storage/options.go @@ -42,7 +42,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/storage/repair" "github.com/m3db/m3/src/dbnode/storage/series" - "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/dbnode/x/xio" "github.com/m3db/m3/src/dbnode/x/xpool" "github.com/m3db/m3/src/x/context" @@ -150,7 +150,7 @@ type options struct { fetchBlockMetadataResultsPool block.FetchBlockMetadataResultsPool fetchBlocksMetadataResultsPool block.FetchBlocksMetadataResultsPool queryIDsWorkerPool xsync.WorkerPool - writeBatchPool *ts.WriteBatchPool + writeBatchPool *writes.WriteBatchPool bufferBucketPool *series.BufferBucketPool bufferBucketVersionsPool *series.BufferBucketVersionsPool retrieveRequestPool fs.RetrieveRequestPool @@ -179,7 +179,7 @@ func newOptions(poolOpts pool.ObjectPoolOptions) Options { queryIDsWorkerPool := xsync.NewWorkerPool(int(math.Ceil(float64(runtime.NumCPU()) / 2))) queryIDsWorkerPool.Init() - writeBatchPool := ts.NewWriteBatchPool(poolOpts, nil, nil) + writeBatchPool := writes.NewWriteBatchPool(poolOpts, nil, nil) writeBatchPool.Init() segmentReaderPool := xio.NewSegmentReaderPool(poolOpts) @@ -674,13 +674,13 @@ func (o *options) QueryIDsWorkerPool() xsync.WorkerPool { return o.queryIDsWorkerPool } -func (o *options) SetWriteBatchPool(value *ts.WriteBatchPool) Options { +func (o *options) SetWriteBatchPool(value *writes.WriteBatchPool) Options { opts := *o opts.writeBatchPool = value return &opts } -func (o *options) WriteBatchPool() *ts.WriteBatchPool { +func (o *options) WriteBatchPool() *writes.WriteBatchPool { return o.writeBatchPool } diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index 9e77b8f6bc..4a807e62b1 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -45,6 +45,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/series/lookup" "github.com/m3db/m3/src/dbnode/tracepoint" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/dbnode/x/xio" "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/checked" @@ -847,7 +848,7 @@ func (s *dbShard) WriteTagged( unit xtime.Unit, annotation []byte, wOpts series.WriteOptions, -) (ts.Series, bool, error) { +) (SeriesWrite, error) { return s.writeAndIndex(ctx, id, tags, timestamp, value, unit, annotation, wOpts, true) } @@ -860,7 +861,7 @@ func (s *dbShard) Write( unit xtime.Unit, annotation []byte, wOpts series.WriteOptions, -) (ts.Series, bool, error) { +) (SeriesWrite, error) { return s.writeAndIndex(ctx, id, ident.EmptyTagIterator, timestamp, value, unit, annotation, wOpts, false) } @@ -875,11 +876,11 @@ func (s *dbShard) writeAndIndex( annotation []byte, wOpts series.WriteOptions, shouldReverseIndex bool, -) (ts.Series, bool, error) { +) (SeriesWrite, error) { // Prepare write entry, opts, err := s.tryRetrieveWritableSeries(id) if err != nil { - return ts.Series{}, false, err + return SeriesWrite{}, err } writable := entry != nil @@ -895,7 +896,7 @@ func (s *dbShard) writeAndIndex( }, }) if err != nil { - return ts.Series{}, false, err + return SeriesWrite{}, err } // Wait for the insert to be batched together and inserted @@ -904,7 +905,7 @@ func (s *dbShard) writeAndIndex( // Retrieve the inserted entry entry, err = s.writableSeries(id, tags) if err != nil { - return ts.Series{}, false, err + return SeriesWrite{}, err } writable = true @@ -915,6 +916,8 @@ func (s *dbShard) writeAndIndex( var ( commitLogSeriesID ident.ID commitLogSeriesUniqueIndex uint64 + needsIndex bool + pendingIndexInsert writes.PendingIndexInsert // Err on the side of caution and always write to the commitlog if writing // async, since there is no information about whether the write succeeded // or not. @@ -935,14 +938,17 @@ func (s *dbShard) writeAndIndex( commitLogSeriesUniqueIndex = entry.Index if err == nil && shouldReverseIndex { if entry.NeedsIndexUpdate(s.reverseIndex.BlockStartForWriteTime(timestamp)) { - err = s.insertSeriesForIndexingAsyncBatched(entry, timestamp, - opts.writeNewSeriesAsync) + if !opts.writeNewSeriesAsync { + return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enable") + } + needsIndex = true + pendingIndexInsert = s.pendingIndexInsert(entry, timestamp) } } // release the reference we got on entry from `writableSeries` entry.DecrementReaderWriterCount() if err != nil { - return ts.Series{}, false, err + return SeriesWrite{}, err } } else { // This is an asynchronous insert and write which means we need to clone the annotation @@ -965,15 +971,19 @@ func (s *dbShard) writeAndIndex( annotation: annotationClone, opts: wOpts, }, - hasPendingIndexing: shouldReverseIndex, - pendingIndex: dbShardPendingIndex{ - timestamp: timestamp, - enqueuedAt: s.nowFn(), - }, }) if err != nil { - return ts.Series{}, false, err + return SeriesWrite{}, err } + + if shouldReverseIndex { + if !opts.writeNewSeriesAsync { + return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enable") + } + needsIndex = true + pendingIndexInsert = s.pendingIndexInsert(entry, timestamp) + } + // NB(r): Make sure to use the copied ID which will eventually // be set to the newly series inserted ID. // The `id` var here is volatile after the context is closed @@ -983,15 +993,18 @@ func (s *dbShard) writeAndIndex( commitLogSeriesUniqueIndex = result.entry.Index } - // Write commit log - series := ts.Series{ - UniqueIndex: commitLogSeriesUniqueIndex, - Namespace: s.namespace.ID(), - ID: commitLogSeriesID, - Shard: s.shard, - } - - return series, wasWritten, nil + // Return metadata useful for writing to commit log and indexing. + return SeriesWrite{ + Series: ts.Series{ + UniqueIndex: commitLogSeriesUniqueIndex, + Namespace: s.namespace.ID(), + ID: commitLogSeriesID, + Shard: s.shard, + }, + WasWritten: wasWritten, + NeedsIndex: needsIndex, + PendingIndexInsert: pendingIndexInsert, + }, nil } func (s *dbShard) SeriesReadWriteRef( @@ -1210,6 +1223,22 @@ type insertAsyncResult struct { entry *lookup.Entry } +func (s *dbShard) pendingIndexInsert( + entry *lookup.Entry, + timestamp time.Time, +) writes.PendingIndexInsert { + // inc a ref on the entry to ensure it's valid until the queue acts upon it. + entry.OnIndexPrepare() + return writes.PendingIndexInsert{ + Entry: index.WriteBatchEntry{ + Timestamp: timestamp, + OnIndexSeries: entry, + EnqueuedAt: s.nowFn(), + }, + Document: entry.Series.Metadata(), + } +} + func (s *dbShard) insertSeriesForIndexingAsyncBatched( entry *lookup.Entry, timestamp time.Time, diff --git a/src/dbnode/storage/storage_mock.go b/src/dbnode/storage/storage_mock.go index b2bbfe04c5..e40aea187e 100644 --- a/src/dbnode/storage/storage_mock.go +++ b/src/dbnode/storage/storage_mock.go @@ -5,6 +5,10 @@ package storage import ( + reflect "reflect" + sync0 "sync" + time0 "time" + gomock "github.com/golang/mock/gomock" clock "github.com/m3db/m3/src/dbnode/clock" encoding "github.com/m3db/m3/src/dbnode/encoding" @@ -21,6 +25,7 @@ import ( repair "github.com/m3db/m3/src/dbnode/storage/repair" series "github.com/m3db/m3/src/dbnode/storage/series" ts "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" xio "github.com/m3db/m3/src/dbnode/x/xio" xpool "github.com/m3db/m3/src/dbnode/x/xpool" context "github.com/m3db/m3/src/x/context" @@ -30,9 +35,6 @@ import ( pool "github.com/m3db/m3/src/x/pool" sync "github.com/m3db/m3/src/x/sync" time "github.com/m3db/m3/src/x/time" - reflect "reflect" - sync0 "sync" - time0 "time" ) // MockIndexedErrorHandler is a mock of IndexedErrorHandler interface @@ -233,10 +235,10 @@ func (mr *MockDatabaseMockRecorder) WriteTagged(ctx, namespace, id, tags, timest } // BatchWriter mocks base method -func (m *MockDatabase) BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, error) { +func (m *MockDatabase) BatchWriter(namespace ident.ID, batchSize int) (writes.BatchWriter, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchWriter", namespace, batchSize) - ret0, _ := ret[0].(ts.BatchWriter) + ret0, _ := ret[0].(writes.BatchWriter) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -248,7 +250,7 @@ func (mr *MockDatabaseMockRecorder) BatchWriter(namespace, batchSize interface{} } // WriteBatch mocks base method -func (m *MockDatabase) WriteBatch(ctx context.Context, namespace ident.ID, writes ts.BatchWriter, errHandler IndexedErrorHandler) error { +func (m *MockDatabase) WriteBatch(ctx context.Context, namespace ident.ID, writes writes.BatchWriter, errHandler IndexedErrorHandler) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteBatch", ctx, namespace, writes, errHandler) ret0, _ := ret[0].(error) @@ -262,7 +264,7 @@ func (mr *MockDatabaseMockRecorder) WriteBatch(ctx, namespace, writes, errHandle } // WriteTaggedBatch mocks base method -func (m *MockDatabase) WriteTaggedBatch(ctx context.Context, namespace ident.ID, writes ts.BatchWriter, errHandler IndexedErrorHandler) error { +func (m *MockDatabase) WriteTaggedBatch(ctx context.Context, namespace ident.ID, writes writes.BatchWriter, errHandler IndexedErrorHandler) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTaggedBatch", ctx, namespace, writes, errHandler) ret0, _ := ret[0].(error) @@ -628,10 +630,10 @@ func (mr *MockdatabaseMockRecorder) WriteTagged(ctx, namespace, id, tags, timest } // BatchWriter mocks base method -func (m *Mockdatabase) BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, error) { +func (m *Mockdatabase) BatchWriter(namespace ident.ID, batchSize int) (writes.BatchWriter, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchWriter", namespace, batchSize) - ret0, _ := ret[0].(ts.BatchWriter) + ret0, _ := ret[0].(writes.BatchWriter) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -643,7 +645,7 @@ func (mr *MockdatabaseMockRecorder) BatchWriter(namespace, batchSize interface{} } // WriteBatch mocks base method -func (m *Mockdatabase) WriteBatch(ctx context.Context, namespace ident.ID, writes ts.BatchWriter, errHandler IndexedErrorHandler) error { +func (m *Mockdatabase) WriteBatch(ctx context.Context, namespace ident.ID, writes writes.BatchWriter, errHandler IndexedErrorHandler) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteBatch", ctx, namespace, writes, errHandler) ret0, _ := ret[0].(error) @@ -657,7 +659,7 @@ func (mr *MockdatabaseMockRecorder) WriteBatch(ctx, namespace, writes, errHandle } // WriteTaggedBatch mocks base method -func (m *Mockdatabase) WriteTaggedBatch(ctx context.Context, namespace ident.ID, writes ts.BatchWriter, errHandler IndexedErrorHandler) error { +func (m *Mockdatabase) WriteTaggedBatch(ctx context.Context, namespace ident.ID, writes writes.BatchWriter, errHandler IndexedErrorHandler) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTaggedBatch", ctx, namespace, writes, errHandler) ret0, _ := ret[0].(error) @@ -3925,7 +3927,7 @@ func (mr *MockOptionsMockRecorder) QueryIDsWorkerPool() *gomock.Call { } // SetWriteBatchPool mocks base method -func (m *MockOptions) SetWriteBatchPool(value *ts.WriteBatchPool) Options { +func (m *MockOptions) SetWriteBatchPool(value *writes.WriteBatchPool) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetWriteBatchPool", value) ret0, _ := ret[0].(Options) @@ -3939,10 +3941,10 @@ func (mr *MockOptionsMockRecorder) SetWriteBatchPool(value interface{}) *gomock. } // WriteBatchPool mocks base method -func (m *MockOptions) WriteBatchPool() *ts.WriteBatchPool { +func (m *MockOptions) WriteBatchPool() *writes.WriteBatchPool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteBatchPool") - ret0, _ := ret[0].(*ts.WriteBatchPool) + ret0, _ := ret[0].(*writes.WriteBatchPool) return ret0 } diff --git a/src/dbnode/storage/types.go b/src/dbnode/storage/types.go index f73b1d60ba..9cd3ba2277 100644 --- a/src/dbnode/storage/types.go +++ b/src/dbnode/storage/types.go @@ -41,6 +41,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/series" "github.com/m3db/m3/src/dbnode/storage/series/lookup" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/dbnode/x/xio" "github.com/m3db/m3/src/dbnode/x/xpool" "github.com/m3db/m3/src/x/context" @@ -122,17 +123,17 @@ type Database interface { // Note that when using the BatchWriter the caller owns the lifecycle of the series // IDs if they're being pooled its the callers responsibility to return them to the // appropriate pool, but the encoded tags and annotations are owned by the - // ts.WriteBatch itself and will be finalized when the entire ts.WriteBatch is finalized + // writes.WriteBatch itself and will be finalized when the entire writes.WriteBatch is finalized // due to their lifecycle being more complicated. // Callers can still control the pooling of the encoded tags and annotations by using // the SetFinalizeEncodedTagsFn and SetFinalizeAnnotationFn on the WriteBatch itself. - BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, error) + BatchWriter(namespace ident.ID, batchSize int) (writes.BatchWriter, error) // WriteBatch is the same as Write, but in batch. WriteBatch( ctx context.Context, namespace ident.ID, - writes ts.BatchWriter, + writes writes.BatchWriter, errHandler IndexedErrorHandler, ) error @@ -140,7 +141,7 @@ type Database interface { WriteTaggedBatch( ctx context.Context, namespace ident.ID, - writes ts.BatchWriter, + writes writes.BatchWriter, errHandler IndexedErrorHandler, ) error @@ -266,6 +267,14 @@ func (n NamespacesByID) Less(i, j int) bool { return bytes.Compare(n[i].ID().Bytes(), n[j].ID().Bytes()) < 0 } +// SeriesWrite is a result of a series write. +type SeriesWrite struct { + Series ts.Series + WasWritten bool + NeedsIndex bool + PendingIndexInsert writes.PendingIndexInsert +} + type databaseNamespace interface { Namespace @@ -289,7 +298,7 @@ type databaseNamespace interface { value float64, unit xtime.Unit, annotation []byte, - ) (ts.Series, bool, error) + ) (SeriesWrite, error) // WriteTagged values to the namespace for an ID. WriteTagged( @@ -300,7 +309,7 @@ type databaseNamespace interface { value float64, unit xtime.Unit, annotation []byte, - ) (ts.Series, bool, error) + ) (SeriesWrite, error) // QueryIDs resolves the given query into known IDs. QueryIDs( @@ -392,6 +401,9 @@ type databaseNamespace interface { id ident.ID, tags ident.TagIterator, ) (result SeriesReadWriteRef, owned bool, err error) + + // WritePendingIndexInserts will write any pending index inserts. + WritePendingIndexInserts(pending []writes.PendingIndexInsert) error } // SeriesReadWriteRef is a read/write reference for a series, @@ -447,7 +459,7 @@ type databaseShard interface { unit xtime.Unit, annotation []byte, wOpts series.WriteOptions, - ) (ts.Series, bool, error) + ) (SeriesWrite, error) // WriteTagged writes a value to the shard for an ID with tags. WriteTagged( @@ -459,7 +471,7 @@ type databaseShard interface { unit xtime.Unit, annotation []byte, wOpts series.WriteOptions, - ) (ts.Series, bool, error) + ) (SeriesWrite, error) ReadEncoded( ctx context.Context, @@ -600,6 +612,11 @@ type NamespaceIndex interface { batch *index.WriteBatch, ) error + // WritePending indexes the provided pending entries. + WritePending( + pending []writes.PendingIndexInsert, + ) error + // Query resolves the given query into known IDs. Query( ctx context.Context, @@ -683,7 +700,13 @@ type namespaceIndexInsertQueue interface { // inserts to the index asynchronously. It executes the provided callbacks // based on the result of the execution. The returned wait group can be used // if the insert is required to be synchronous. - InsertBatch(batch *index.WriteBatch) (*sync.WaitGroup, error) + InsertBatch( + batch *index.WriteBatch, + ) (*sync.WaitGroup, error) + + InsertPending( + pending []writes.PendingIndexInsert, + ) (*sync.WaitGroup, error) } // databaseBootstrapManager manages the bootstrap process. @@ -1050,10 +1073,10 @@ type Options interface { QueryIDsWorkerPool() xsync.WorkerPool // SetWriteBatchPool sets the WriteBatch pool. - SetWriteBatchPool(value *ts.WriteBatchPool) Options + SetWriteBatchPool(value *writes.WriteBatchPool) Options // WriteBatchPool returns the WriteBatch pool. - WriteBatchPool() *ts.WriteBatchPool + WriteBatchPool() *writes.WriteBatchPool // SetBufferBucketPool sets the BufferBucket pool. SetBufferBucketPool(value *series.BufferBucketPool) Options diff --git a/src/dbnode/ts/types.go b/src/dbnode/ts/types.go index a1eff122c0..0460d9a25e 100644 --- a/src/dbnode/ts/types.go +++ b/src/dbnode/ts/types.go @@ -27,48 +27,6 @@ import ( xtime "github.com/m3db/m3/src/x/time" ) -// FinalizeEncodedTagsFn is a function that will be called for each encoded tags once -// the WriteBatch itself is finalized. -type FinalizeEncodedTagsFn func(b []byte) - -// FinalizeAnnotationFn is a function that will be called for each annotation once -// the WriteBatch itself is finalized. -type FinalizeAnnotationFn func(b []byte) - -// Write is a write for the commitlog. -type Write struct { - Series Series - Datapoint Datapoint - Unit xtime.Unit - Annotation Annotation -} - -// BatchWrite represents a write that was added to the -// BatchWriter. -type BatchWrite struct { - // Used by the commitlog. If this is false, the commitlog should not write - // the series at this index. - SkipWrite bool - // Used by the commitlog (series needed to be updated by the shard - // object first, cannot use the Series provided by the caller as it - // is missing important fields like Tags.) - Write Write - // Not used by the commitlog, provided by the caller (since the request - // is usually coming from over the wire) and is superseded by the Tags - // in Write.Series which will get set by the Shard object. - TagIter ident.TagIterator - // EncodedTags is used by the commit log, but also held onto as a reference - // here so that it can be returned to the pool after the write to commit log - // completes (since the Write.Series gets overwritten in SetOutcome so can't - // use the reference there for returning to the pool). - EncodedTags EncodedTags - // Used to help the caller tie errors back to an index in their - // own collection. - OriginalIndex int - // Used by the commitlog. - Err error -} - // Series describes a series. type Series struct { // UniqueIndex is the unique index assigned to this series (only valid @@ -106,48 +64,3 @@ type EncodedTags []byte // Annotation represents information used to annotate datapoints. type Annotation []byte - -// WriteBatch is the interface that supports adding writes to the batch, -// as well as iterating through the batched writes and resetting the -// struct (for pooling). -type WriteBatch interface { - BatchWriter - // Can't use a real iterator pattern here as it slows things down. - Iter() []BatchWrite - SetOutcome(idx int, series Series, err error) - SetSkipWrite(idx int) - Reset(batchSize int, ns ident.ID) - Finalize() - - // Returns the WriteBatch's internal capacity. Used by the pool to throw - // away batches that have grown too large. - cap() int -} - -// BatchWriter is the interface that is used for preparing a batch of -// writes. -type BatchWriter interface { - Add( - originalIndex int, - id ident.ID, - timestamp time.Time, - value float64, - unit xtime.Unit, - annotation []byte, - ) error - - AddTagged( - originalIndex int, - id ident.ID, - tags ident.TagIterator, - encodedTags EncodedTags, - timestamp time.Time, - value float64, - unit xtime.Unit, - annotation []byte, - ) error - - SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) - - SetFinalizeAnnotationFn(f FinalizeAnnotationFn) -} diff --git a/src/dbnode/ts/write_batch_mock.go b/src/dbnode/ts/write_batch_mock.go deleted file mode 100644 index f8a5ff682f..0000000000 --- a/src/dbnode/ts/write_batch_mock.go +++ /dev/null @@ -1,261 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/dbnode/ts/types.go - -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package ts is a generated GoMock package. -package ts - -import ( - "reflect" - "time" - - "github.com/m3db/m3/src/x/ident" - time0 "github.com/m3db/m3/src/x/time" - - "github.com/golang/mock/gomock" -) - -// MockWriteBatch is a mock of WriteBatch interface -type MockWriteBatch struct { - ctrl *gomock.Controller - recorder *MockWriteBatchMockRecorder -} - -// MockWriteBatchMockRecorder is the mock recorder for MockWriteBatch -type MockWriteBatchMockRecorder struct { - mock *MockWriteBatch -} - -// NewMockWriteBatch creates a new mock instance -func NewMockWriteBatch(ctrl *gomock.Controller) *MockWriteBatch { - mock := &MockWriteBatch{ctrl: ctrl} - mock.recorder = &MockWriteBatchMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockWriteBatch) EXPECT() *MockWriteBatchMockRecorder { - return m.recorder -} - -// Add mocks base method -func (m *MockWriteBatch) Add(originalIndex int, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Add", originalIndex, id, timestamp, value, unit, annotation) - ret0, _ := ret[0].(error) - return ret0 -} - -// Add indicates an expected call of Add -func (mr *MockWriteBatchMockRecorder) Add(originalIndex, id, timestamp, value, unit, annotation interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockWriteBatch)(nil).Add), originalIndex, id, timestamp, value, unit, annotation) -} - -// AddTagged mocks base method -func (m *MockWriteBatch) AddTagged(originalIndex int, id ident.ID, tags ident.TagIterator, encodedTags EncodedTags, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddTagged", originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddTagged indicates an expected call of AddTagged -func (mr *MockWriteBatchMockRecorder) AddTagged(originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTagged", reflect.TypeOf((*MockWriteBatch)(nil).AddTagged), originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation) -} - -// SetFinalizeEncodedTagsFn mocks base method -func (m *MockWriteBatch) SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFinalizeEncodedTagsFn", f) -} - -// SetFinalizeEncodedTagsFn indicates an expected call of SetFinalizeEncodedTagsFn -func (mr *MockWriteBatchMockRecorder) SetFinalizeEncodedTagsFn(f interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizeEncodedTagsFn", reflect.TypeOf((*MockWriteBatch)(nil).SetFinalizeEncodedTagsFn), f) -} - -// SetFinalizeAnnotationFn mocks base method -func (m *MockWriteBatch) SetFinalizeAnnotationFn(f FinalizeAnnotationFn) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFinalizeAnnotationFn", f) -} - -// SetFinalizeAnnotationFn indicates an expected call of SetFinalizeAnnotationFn -func (mr *MockWriteBatchMockRecorder) SetFinalizeAnnotationFn(f interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizeAnnotationFn", reflect.TypeOf((*MockWriteBatch)(nil).SetFinalizeAnnotationFn), f) -} - -// Iter mocks base method -func (m *MockWriteBatch) Iter() []BatchWrite { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Iter") - ret0, _ := ret[0].([]BatchWrite) - return ret0 -} - -// Iter indicates an expected call of Iter -func (mr *MockWriteBatchMockRecorder) Iter() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iter", reflect.TypeOf((*MockWriteBatch)(nil).Iter)) -} - -// SetOutcome mocks base method -func (m *MockWriteBatch) SetOutcome(idx int, series Series, err error) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetOutcome", idx, series, err) -} - -// SetOutcome indicates an expected call of SetOutcome -func (mr *MockWriteBatchMockRecorder) SetOutcome(idx, series, err interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetOutcome", reflect.TypeOf((*MockWriteBatch)(nil).SetOutcome), idx, series, err) -} - -// SetSkipWrite mocks base method -func (m *MockWriteBatch) SetSkipWrite(idx int) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSkipWrite", idx) -} - -// SetSkipWrite indicates an expected call of SetSkipWrite -func (mr *MockWriteBatchMockRecorder) SetSkipWrite(idx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSkipWrite", reflect.TypeOf((*MockWriteBatch)(nil).SetSkipWrite), idx) -} - -// Reset mocks base method -func (m *MockWriteBatch) Reset(batchSize int, ns ident.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Reset", batchSize, ns) -} - -// Reset indicates an expected call of Reset -func (mr *MockWriteBatchMockRecorder) Reset(batchSize, ns interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockWriteBatch)(nil).Reset), batchSize, ns) -} - -// Finalize mocks base method -func (m *MockWriteBatch) Finalize() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Finalize") -} - -// Finalize indicates an expected call of Finalize -func (mr *MockWriteBatchMockRecorder) Finalize() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Finalize", reflect.TypeOf((*MockWriteBatch)(nil).Finalize)) -} - -// cap mocks base method -func (m *MockWriteBatch) cap() int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "cap") - ret0, _ := ret[0].(int) - return ret0 -} - -// cap indicates an expected call of cap -func (mr *MockWriteBatchMockRecorder) cap() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "cap", reflect.TypeOf((*MockWriteBatch)(nil).cap)) -} - -// MockBatchWriter is a mock of BatchWriter interface -type MockBatchWriter struct { - ctrl *gomock.Controller - recorder *MockBatchWriterMockRecorder -} - -// MockBatchWriterMockRecorder is the mock recorder for MockBatchWriter -type MockBatchWriterMockRecorder struct { - mock *MockBatchWriter -} - -// NewMockBatchWriter creates a new mock instance -func NewMockBatchWriter(ctrl *gomock.Controller) *MockBatchWriter { - mock := &MockBatchWriter{ctrl: ctrl} - mock.recorder = &MockBatchWriterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockBatchWriter) EXPECT() *MockBatchWriterMockRecorder { - return m.recorder -} - -// Add mocks base method -func (m *MockBatchWriter) Add(originalIndex int, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Add", originalIndex, id, timestamp, value, unit, annotation) - ret0, _ := ret[0].(error) - return ret0 -} - -// Add indicates an expected call of Add -func (mr *MockBatchWriterMockRecorder) Add(originalIndex, id, timestamp, value, unit, annotation interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockBatchWriter)(nil).Add), originalIndex, id, timestamp, value, unit, annotation) -} - -// AddTagged mocks base method -func (m *MockBatchWriter) AddTagged(originalIndex int, id ident.ID, tags ident.TagIterator, encodedTags EncodedTags, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddTagged", originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddTagged indicates an expected call of AddTagged -func (mr *MockBatchWriterMockRecorder) AddTagged(originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTagged", reflect.TypeOf((*MockBatchWriter)(nil).AddTagged), originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation) -} - -// SetFinalizeEncodedTagsFn mocks base method -func (m *MockBatchWriter) SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFinalizeEncodedTagsFn", f) -} - -// SetFinalizeEncodedTagsFn indicates an expected call of SetFinalizeEncodedTagsFn -func (mr *MockBatchWriterMockRecorder) SetFinalizeEncodedTagsFn(f interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizeEncodedTagsFn", reflect.TypeOf((*MockBatchWriter)(nil).SetFinalizeEncodedTagsFn), f) -} - -// SetFinalizeAnnotationFn mocks base method -func (m *MockBatchWriter) SetFinalizeAnnotationFn(f FinalizeAnnotationFn) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFinalizeAnnotationFn", f) -} - -// SetFinalizeAnnotationFn indicates an expected call of SetFinalizeAnnotationFn -func (mr *MockBatchWriterMockRecorder) SetFinalizeAnnotationFn(f interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizeAnnotationFn", reflect.TypeOf((*MockBatchWriter)(nil).SetFinalizeAnnotationFn), f) -} diff --git a/src/dbnode/ts/writes/types.go b/src/dbnode/ts/writes/types.go new file mode 100644 index 0000000000..bb8ac839f5 --- /dev/null +++ b/src/dbnode/ts/writes/types.go @@ -0,0 +1,129 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package writes + +import ( + "time" + + "github.com/m3db/m3/src/dbnode/storage/index" + "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/m3ninx/doc" + "github.com/m3db/m3/src/x/ident" + xtime "github.com/m3db/m3/src/x/time" +) + +// FinalizeEncodedTagsFn is a function that will be called for each encoded tags once +// the WriteBatch itself is finalized. +type FinalizeEncodedTagsFn func(b []byte) + +// FinalizeAnnotationFn is a function that will be called for each annotation once +// the WriteBatch itself is finalized. +type FinalizeAnnotationFn func(b []byte) + +// Write is a write for the commitlog. +type Write struct { + Series ts.Series + Datapoint ts.Datapoint + Unit xtime.Unit + Annotation ts.Annotation +} + +// PendingIndexInsert is a pending index insert. +type PendingIndexInsert struct { + Entry index.WriteBatchEntry + Document doc.Document +} + +// BatchWrite represents a write that was added to the +// BatchWriter. +type BatchWrite struct { + // Used by the commitlog. If this is false, the commitlog should not write + // the series at this index. + SkipWrite bool + // PendingIndex returns whether a write has a pending index. + PendingIndex bool + // Used by the commitlog (series needed to be updated by the shard + // object first, cannot use the Series provided by the caller as it + // is missing important fields like Tags.) + Write Write + // Not used by the commitlog, provided by the caller (since the request + // is usually coming from over the wire) and is superseded by the Tags + // in Write.Series which will get set by the Shard object. + TagIter ident.TagIterator + // EncodedTags is used by the commit log, but also held onto as a reference + // here so that it can be returned to the pool after the write to commit log + // completes (since the Write.Series gets overwritten in SetOutcome so can't + // use the reference there for returning to the pool). + EncodedTags ts.EncodedTags + // Used to help the caller tie errors back to an index in their + // own collection. + OriginalIndex int + // Used by the commitlog. + Err error +} + +// WriteBatch is the interface that supports adding writes to the batch, +// as well as iterating through the batched writes and resetting the +// struct (for pooling). +type WriteBatch interface { + BatchWriter + // Can't use a real iterator pattern here as it slows things down. + Iter() []BatchWrite + SetPendingIndex(idx int, pending PendingIndexInsert) + PendingIndex() []PendingIndexInsert + SetError(idx int, err error) + SetSeries(idx int, series ts.Series) + SetSkipWrite(idx int) + Reset(batchSize int, ns ident.ID) + Finalize() + + // Returns the WriteBatch's internal capacity. Used by the pool to throw + // away batches that have grown too large. + cap() int +} + +// BatchWriter is the interface that is used for preparing a batch of +// writes. +type BatchWriter interface { + Add( + originalIndex int, + id ident.ID, + timestamp time.Time, + value float64, + unit xtime.Unit, + annotation []byte, + ) error + + AddTagged( + originalIndex int, + id ident.ID, + tags ident.TagIterator, + encodedTags ts.EncodedTags, + timestamp time.Time, + value float64, + unit xtime.Unit, + annotation []byte, + ) error + + SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) + + SetFinalizeAnnotationFn(f FinalizeAnnotationFn) +} diff --git a/src/dbnode/ts/write_batch.go b/src/dbnode/ts/writes/write_batch.go similarity index 84% rename from src/dbnode/ts/write_batch.go rename to src/dbnode/ts/writes/write_batch.go index fb3a8231e0..455e00896a 100644 --- a/src/dbnode/ts/write_batch.go +++ b/src/dbnode/ts/writes/write_batch.go @@ -18,12 +18,13 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package ts +package writes import ( "errors" "time" + "github.com/m3db/m3/src/dbnode/ts" "github.com/m3db/m3/src/x/ident" xtime "github.com/m3db/m3/src/x/time" ) @@ -33,8 +34,9 @@ var ( ) type writeBatch struct { - writes []BatchWrite - ns ident.ID + writes []BatchWrite + pendingIndex []PendingIndexInsert + ns ident.ID // Enables callers to pool encoded tags by allowing them to // provide a function to finalize all encoded tags once the // writeBatch itself gets finalized. @@ -53,9 +55,10 @@ func NewWriteBatch( finalizeFn func(WriteBatch), ) WriteBatch { return &writeBatch{ - writes: make([]BatchWrite, 0, batchSize), - ns: ns, - finalizeFn: finalizeFn, + writes: make([]BatchWrite, 0, batchSize), + pendingIndex: make([]PendingIndexInsert, 0, batchSize), + ns: ns, + finalizeFn: finalizeFn, } } @@ -80,7 +83,7 @@ func (b *writeBatch) AddTagged( originalIndex int, id ident.ID, tagIter ident.TagIterator, - encodedTags EncodedTags, + encodedTags ts.EncodedTags, timestamp time.Time, value float64, unit xtime.Unit, @@ -116,11 +119,15 @@ func (b *writeBatch) Iter() []BatchWrite { return b.writes } -func (b *writeBatch) SetOutcome(idx int, series Series, err error) { +func (b *writeBatch) SetSeries(idx int, series ts.Series) { b.writes[idx].SkipWrite = false b.writes[idx].Write.Series = series // Make sure that the EncodedTags does not get clobbered b.writes[idx].Write.Series.EncodedTags = b.writes[idx].EncodedTags +} + +func (b *writeBatch) SetError(idx int, err error) { + b.writes[idx].SkipWrite = true b.writes[idx].Err = err } @@ -128,6 +135,15 @@ func (b *writeBatch) SetSkipWrite(idx int) { b.writes[idx].SkipWrite = true } +func (b *writeBatch) SetPendingIndex(idx int, pending PendingIndexInsert) { + b.writes[idx].PendingIndex = true + b.pendingIndex = append(b.pendingIndex, pending) +} + +func (b *writeBatch) PendingIndex() []PendingIndexInsert { + return b.pendingIndex +} + // Set the function that will be called to finalize annotations when a WriteBatch // is finalized, allowing the caller to pool them. func (b *writeBatch) SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) { @@ -174,6 +190,13 @@ func (b *writeBatch) Finalize() { } b.writes = b.writes[:0] + var zeroedIndex PendingIndexInsert + for i := range b.pendingIndex { + // Remove any remaining pointers for G.C reasons. + b.pendingIndex[i] = zeroedIndex + } + b.pendingIndex = b.pendingIndex[:0] + b.finalizeFn(b) } @@ -186,7 +209,7 @@ func newBatchWriterWrite( namespace ident.ID, id ident.ID, tagIter ident.TagIterator, - encodedTags EncodedTags, + encodedTags ts.EncodedTags, timestamp time.Time, value float64, unit xtime.Unit, @@ -199,12 +222,12 @@ func newBatchWriterWrite( } return BatchWrite{ Write: Write{ - Series: Series{ + Series: ts.Series{ ID: id, EncodedTags: encodedTags, Namespace: namespace, }, - Datapoint: Datapoint{ + Datapoint: ts.Datapoint{ Timestamp: timestamp, TimestampNanos: xtime.ToUnixNano(timestamp), Value: value, diff --git a/src/dbnode/ts/write_batch_pool.go b/src/dbnode/ts/writes/write_batch_pool.go similarity index 99% rename from src/dbnode/ts/write_batch_pool.go rename to src/dbnode/ts/writes/write_batch_pool.go index a29b9f049d..1aec4272c7 100644 --- a/src/dbnode/ts/write_batch_pool.go +++ b/src/dbnode/ts/writes/write_batch_pool.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package ts +package writes import ( "github.com/m3db/m3/src/x/pool" diff --git a/src/dbnode/ts/write_batch_test.go b/src/dbnode/ts/writes/write_batch_test.go similarity index 99% rename from src/dbnode/ts/write_batch_test.go rename to src/dbnode/ts/writes/write_batch_test.go index 12b76a2c2f..b3ea6d0e90 100644 --- a/src/dbnode/ts/write_batch_test.go +++ b/src/dbnode/ts/writes/write_batch_test.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package ts +package writes import ( "bytes" diff --git a/src/x/sync/cpu.go b/src/x/sync/cpu.go new file mode 100644 index 0000000000..c8caaf4717 --- /dev/null +++ b/src/x/sync/cpu.go @@ -0,0 +1,11 @@ +// +build !amd64 !linux + +package sync + +// CPU returns a unique identifier for the core the current goroutine is +// executing on. This function is platform dependent, and is implemented in +// cpu_*.s. +func CPU() uint64 { + // this reverts the behaviour to that of a regular DRWMutex + return 0 +} diff --git a/src/x/sync/cpu_linux_amd64.go b/src/x/sync/cpu_linux_amd64.go new file mode 100644 index 0000000000..8120491b5d --- /dev/null +++ b/src/x/sync/cpu_linux_amd64.go @@ -0,0 +1,4 @@ +package sync + +// CPU is the currently running CPU. +func CPU() uint64 diff --git a/src/x/sync/cpu_linux_amd64.s b/src/x/sync/cpu_linux_amd64.s new file mode 100644 index 0000000000..01146d4ed2 --- /dev/null +++ b/src/x/sync/cpu_linux_amd64.s @@ -0,0 +1,15 @@ +#include "textflag.h" + +// func CPU() uint64 +TEXT CPU(SB),NOSPLIT,$0-8 + MOVL $0x01, AX // version information + MOVL $0x00, BX // any leaf will do + MOVL $0x00, CX // any subleaf will do + + // call CPUID + BYTE $0x0f + BYTE $0xa2 + + SHRQ $24, BX // logical cpu id is put in EBX[31-24] + MOVQ BX, ret+0(FP) + RET diff --git a/src/x/sync/map_cpus.go b/src/x/sync/map_cpus.go new file mode 100644 index 0000000000..c59314cafa --- /dev/null +++ b/src/x/sync/map_cpus.go @@ -0,0 +1,62 @@ +package sync + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +var ( + cpus []int + cpusRead bool +) + +func init() { + cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") + if err != nil { + return + } + + var pnum int + var apic uint64 + lines := strings.Split(string(cpuinfo), "\n") + for i, line := range lines { + if len(line) == 0 && i != 0 { + if int(apic) >= len(cpus) { + realloc := make([]int, 2*apic) + copy(realloc, cpus) + cpus = realloc + } + cpus[apic] = pnum + pnum = 0 + apic = 0 + continue + } + + fields := strings.Fields(line) + + switch fields[0] { + case "processor": + pnum, err = strconv.Atoi(fields[2]) + case "apicid": + apic, err = strconv.ParseUint(fields[2], 10, 64) + } + + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + return + } + } + + cpusRead = true +} + +// IndexCPU returns the current CPU index. +func IndexCPU() int { + if !cpusRead { + return 0 + } + return cpus[CPU()] +} From 660794a511224f6cfbd4bbf22fa49c0842095545 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 01:57:08 -0400 Subject: [PATCH 03/37] Fix integration tests build --- .../integration/commitlog_bootstrap_helpers.go | 18 +++++++++++++++--- ...ommitlog_bootstrap_index_perf_speed_test.go | 12 +++++++++++- src/dbnode/integration/generate/writer.go | 4 +++- src/dbnode/persist/fs/clone/cloner.go | 4 +++- 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/src/dbnode/integration/commitlog_bootstrap_helpers.go b/src/dbnode/integration/commitlog_bootstrap_helpers.go index 652a355bcd..0bc98de968 100644 --- a/src/dbnode/integration/commitlog_bootstrap_helpers.go +++ b/src/dbnode/integration/commitlog_bootstrap_helpers.go @@ -151,8 +151,10 @@ func writeCommitLogDataBase( t, defaultIntegrationTestFlushInterval, opts.FlushInterval()) var ( - seriesLookup = newCommitLogSeriesStates(data) - shardSet = s.ShardSet() + seriesLookup = newCommitLogSeriesStates(data) + shardSet = s.ShardSet() + tagEncoderPool = opts.FilesystemOptions().TagEncoderPool() + tagSliceIter = ident.NewTagsIterator(ident.Tags{}) ) // Write out commit log data. @@ -182,11 +184,21 @@ func writeCommitLogDataBase( for _, point := range points { series, ok := seriesLookup[point.ID.String()] require.True(t, ok) + + tagSliceIter.Reset(series.tags) + + tagEncoder := tagEncoderPool.Get() + err := tagEncoder.Encode(tagSliceIter) + require.NoError(t, err) + + encodedTagsChecked, ok := tagEncoder.Data() + require.True(t, ok) + cID := ts.Series{ Namespace: namespace.ID(), Shard: shardSet.Lookup(point.ID), ID: point.ID, - Tags: series.tags, + EncodedTags: ts.EncodedTags(encodedTagsChecked.Bytes()), UniqueIndex: series.uniqueIndex, } if pred(point.Value) { diff --git a/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go b/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go index f0ba25f50e..66154a1278 100644 --- a/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go +++ b/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go @@ -156,6 +156,8 @@ func TestCommitLogIndexPerfSpeedBootstrap(t *testing.T) { seriesID := ident.BinaryID(checkedBytes) numBytes := make([]byte, 8) numHexBytes := make([]byte, hex.EncodedLen(len(numBytes))) + tagEncoderPool := commitLogOpts.FilesystemOptions().TagEncoderPool() + tagSliceIter := ident.NewTagsIterator(ident.Tags{}) for i := 0; i < numPoints; i++ { for j := 0; j < numSeries; j++ { // Write the ID prefix @@ -171,11 +173,19 @@ func TestCommitLogIndexPerfSpeedBootstrap(t *testing.T) { // Use the tag sets appropriate for this series number seriesTags := tagSets[j%len(tagSets)] + tagSliceIter.Reset(seriesTags) + tagEncoder := tagEncoderPool.Get() + err := tagEncoder.Encode(tagSliceIter) + require.NoError(t, err) + + encodedTagsChecked, ok := tagEncoder.Data() + require.True(t, ok) + series := ts.Series{ Namespace: ns.ID(), Shard: shardSet.Lookup(seriesID), ID: seriesID, - Tags: seriesTags, + EncodedTags: ts.EncodedTags(encodedTagsChecked.Bytes()), UniqueIndex: uint64(j), } dp := ts.Datapoint{ diff --git a/src/dbnode/integration/generate/writer.go b/src/dbnode/integration/generate/writer.go index 1190dfdc8c..419f99e2f1 100644 --- a/src/dbnode/integration/generate/writer.go +++ b/src/dbnode/integration/generate/writer.go @@ -219,7 +219,9 @@ func writeToDiskWithPredicate( data[0] = segment.Head data[1] = segment.Tail checksum := segment.CalculateChecksum() - err = writer.WriteAll(series.ID, series.Tags, data, checksum) + metadata := persist.NewMetadataFromIDAndTags(series.ID, series.Tags, + persist.MetadataOptions{}) + err = writer.WriteAll(metadata, data, checksum) if err != nil { return err } diff --git a/src/dbnode/persist/fs/clone/cloner.go b/src/dbnode/persist/fs/clone/cloner.go index 74cb7cf4e6..ea9acdae95 100644 --- a/src/dbnode/persist/fs/clone/cloner.go +++ b/src/dbnode/persist/fs/clone/cloner.go @@ -97,7 +97,9 @@ func (c *cloner) Clone(src FileSetID, dest FileSetID, destBlocksize time.Duratio } data.IncRef() - if err := writer.Write(id, tags, data, checksum); err != nil { + metadata := persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + if err := writer.Write(metadata, data, checksum); err != nil { return fmt.Errorf("unexpected error while writing data: %v", err) } data.DecRef() From 0df4663bc3645453febc821a33652c33d1d7eb1f Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 10:59:16 -0400 Subject: [PATCH 04/37] Fix complicated build --- src/x/sync/cpu.go | 11 --- src/x/sync/cpu_amd64.go | 24 ++++++ src/x/sync/{cpu_linux_amd64.s => cpu_amd64.s} | 0 src/x/sync/cpu_index.go | 82 +++++++++++++++++++ src/x/sync/cpu_linux_amd64.go | 4 - src/x/sync/cpu_other.go | 31 +++++++ src/x/sync/map_cpus.go | 62 -------------- 7 files changed, 137 insertions(+), 77 deletions(-) delete mode 100644 src/x/sync/cpu.go create mode 100644 src/x/sync/cpu_amd64.go rename src/x/sync/{cpu_linux_amd64.s => cpu_amd64.s} (100%) create mode 100644 src/x/sync/cpu_index.go delete mode 100644 src/x/sync/cpu_linux_amd64.go create mode 100644 src/x/sync/cpu_other.go delete mode 100644 src/x/sync/map_cpus.go diff --git a/src/x/sync/cpu.go b/src/x/sync/cpu.go deleted file mode 100644 index c8caaf4717..0000000000 --- a/src/x/sync/cpu.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !amd64 !linux - -package sync - -// CPU returns a unique identifier for the core the current goroutine is -// executing on. This function is platform dependent, and is implemented in -// cpu_*.s. -func CPU() uint64 { - // this reverts the behaviour to that of a regular DRWMutex - return 0 -} diff --git a/src/x/sync/cpu_amd64.go b/src/x/sync/cpu_amd64.go new file mode 100644 index 0000000000..157be98f29 --- /dev/null +++ b/src/x/sync/cpu_amd64.go @@ -0,0 +1,24 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sync + +// CPU is the currently running CPU. +func CPU() uint64 diff --git a/src/x/sync/cpu_linux_amd64.s b/src/x/sync/cpu_amd64.s similarity index 100% rename from src/x/sync/cpu_linux_amd64.s rename to src/x/sync/cpu_amd64.s diff --git a/src/x/sync/cpu_index.go b/src/x/sync/cpu_index.go new file mode 100644 index 0000000000..a6406b4a9f --- /dev/null +++ b/src/x/sync/cpu_index.go @@ -0,0 +1,82 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sync + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +var ( + cpus []int + cpusRead bool +) + +func init() { + cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") + if err != nil { + return + } + + var pnum int + var apic uint64 + lines := strings.Split(string(cpuinfo), "\n") + for i, line := range lines { + if len(line) == 0 && i != 0 { + if int(apic) >= len(cpus) { + realloc := make([]int, 2*apic) + copy(realloc, cpus) + cpus = realloc + } + cpus[apic] = pnum + pnum = 0 + apic = 0 + continue + } + + fields := strings.Fields(line) + + switch fields[0] { + case "processor": + pnum, err = strconv.Atoi(fields[2]) + case "apicid": + apic, err = strconv.ParseUint(fields[2], 10, 64) + } + + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + return + } + } + + cpusRead = true +} + +// IndexCPU returns the current CPU index. +func IndexCPU() int { + if !cpusRead { + return 0 // Likely not linux and nothing available in procinfo. + } + return cpus[CPU()] +} diff --git a/src/x/sync/cpu_linux_amd64.go b/src/x/sync/cpu_linux_amd64.go deleted file mode 100644 index 8120491b5d..0000000000 --- a/src/x/sync/cpu_linux_amd64.go +++ /dev/null @@ -1,4 +0,0 @@ -package sync - -// CPU is the currently running CPU. -func CPU() uint64 diff --git a/src/x/sync/cpu_other.go b/src/x/sync/cpu_other.go new file mode 100644 index 0000000000..d6c062d1d0 --- /dev/null +++ b/src/x/sync/cpu_other.go @@ -0,0 +1,31 @@ +// +build !amd64 +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sync + +// CPU returns a unique identifier for the core the current goroutine is +// executing on. This function is platform dependent, and is implemented in +// cpu_*.s. +func CPU() uint64 { + // this reverts the behaviour to that of a regular DRWMutex + return 0 +} diff --git a/src/x/sync/map_cpus.go b/src/x/sync/map_cpus.go deleted file mode 100644 index c59314cafa..0000000000 --- a/src/x/sync/map_cpus.go +++ /dev/null @@ -1,62 +0,0 @@ -package sync - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -var ( - cpus []int - cpusRead bool -) - -func init() { - cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") - if err != nil { - return - } - - var pnum int - var apic uint64 - lines := strings.Split(string(cpuinfo), "\n") - for i, line := range lines { - if len(line) == 0 && i != 0 { - if int(apic) >= len(cpus) { - realloc := make([]int, 2*apic) - copy(realloc, cpus) - cpus = realloc - } - cpus[apic] = pnum - pnum = 0 - apic = 0 - continue - } - - fields := strings.Fields(line) - - switch fields[0] { - case "processor": - pnum, err = strconv.Atoi(fields[2]) - case "apicid": - apic, err = strconv.ParseUint(fields[2], 10, 64) - } - - if err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - return - } - } - - cpusRead = true -} - -// IndexCPU returns the current CPU index. -func IndexCPU() int { - if !cpusRead { - return 0 - } - return cpus[CPU()] -} From ee65fb8a3f01acdd027117c142989d177969a5fb Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 10:59:39 -0400 Subject: [PATCH 05/37] Rename to index_cpu --- src/x/sync/cpu_index.go | 82 ----------------------------------------- 1 file changed, 82 deletions(-) delete mode 100644 src/x/sync/cpu_index.go diff --git a/src/x/sync/cpu_index.go b/src/x/sync/cpu_index.go deleted file mode 100644 index a6406b4a9f..0000000000 --- a/src/x/sync/cpu_index.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package sync - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -var ( - cpus []int - cpusRead bool -) - -func init() { - cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") - if err != nil { - return - } - - var pnum int - var apic uint64 - lines := strings.Split(string(cpuinfo), "\n") - for i, line := range lines { - if len(line) == 0 && i != 0 { - if int(apic) >= len(cpus) { - realloc := make([]int, 2*apic) - copy(realloc, cpus) - cpus = realloc - } - cpus[apic] = pnum - pnum = 0 - apic = 0 - continue - } - - fields := strings.Fields(line) - - switch fields[0] { - case "processor": - pnum, err = strconv.Atoi(fields[2]) - case "apicid": - apic, err = strconv.ParseUint(fields[2], 10, 64) - } - - if err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - return - } - } - - cpusRead = true -} - -// IndexCPU returns the current CPU index. -func IndexCPU() int { - if !cpusRead { - return 0 // Likely not linux and nothing available in procinfo. - } - return cpus[CPU()] -} From a41d97c6a95503e34bff224aa1153990c3ed9d13 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 11:15:41 -0400 Subject: [PATCH 06/37] Add metrics for each CPU queue --- src/dbnode/storage/index_insert_queue.go | 74 +++++++++++++++++---- src/x/sync/index_cpu.go | 82 ++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 12 deletions(-) create mode 100644 src/x/sync/index_cpu.go diff --git a/src/dbnode/storage/index_insert_queue.go b/src/dbnode/storage/index_insert_queue.go index 1b2190767c..addfb9541e 100644 --- a/src/dbnode/storage/index_insert_queue.go +++ b/src/dbnode/storage/index_insert_queue.go @@ -23,6 +23,7 @@ package storage import ( "errors" "runtime" + "strconv" "sync" "time" @@ -72,6 +73,8 @@ type nsIndexInsertQueue struct { notifyInsert chan struct{} closeCh chan struct{} + scope tally.Scope + metrics nsIndexInsertQueueMetrics } @@ -102,14 +105,23 @@ func newNamespaceIndexInsertQueue( sleepFn: time.Sleep, notifyInsert: make(chan struct{}, 1), closeCh: make(chan struct{}, 1), + scope: subscope, metrics: newNamespaceIndexInsertQueueMetrics(subscope), } - q.currBatch = q.newBatch() + q.currBatch = q.newBatch(newBatchOptions{instrumented: true}) return q } -func (q *nsIndexInsertQueue) newBatch() *nsIndexInsertBatch { - return newNsIndexInsertBatch(q.namespaceMetadata, q.nowFn) +type newBatchOptions struct { + instrumented bool +} + +func (q *nsIndexInsertQueue) newBatch(opts newBatchOptions) *nsIndexInsertBatch { + scope := tally.NoopScope + if opts.instrumented { + scope = q.scope + } + return newNsIndexInsertBatch(q.namespaceMetadata, q.nowFn, scope) } func (q *nsIndexInsertQueue) insertLoop() { @@ -118,7 +130,7 @@ func (q *nsIndexInsertQueue) insertLoop() { }() var lastInsert time.Time - batch := q.newBatch() + batch := q.newBatch(newBatchOptions{}) for range q.notifyInsert { // Check if inserting too fast elapsedSinceLastInsert := q.nowFn().Sub(lastInsert) @@ -157,10 +169,6 @@ func (q *nsIndexInsertQueue) insertLoop() { } } -func (q *nsIndexInsertQueue) rotate(target *nsIndexInsertBatch) { - -} - func (q *nsIndexInsertQueue) InsertBatch( batch *index.WriteBatch, ) (*sync.WaitGroup, error) { @@ -250,21 +258,47 @@ type nsIndexInsertBatch struct { namespace namespace.Metadata nowFn clock.NowFn wg *sync.WaitGroup - insertsByCPU []*insertsByCPU + insertsByCPU []*nsIndexInsertsByCPU allInserts *index.WriteBatch allInsertsLastReset time.Time } -type insertsByCPU struct { +type nsIndexInsertsByCPU struct { sync.Mutex shardInserts []*index.WriteBatch batchInserts []writes.PendingIndexInsert wg *sync.WaitGroup + metrics nsIndexInsertsByCPUMetrics +} + +type nsIndexInsertsByCPUMetrics struct { + rotateInsertsShard tally.Counter + rotateInsertsPending tally.Counter +} + +func newNamespaceIndexInsertsByCPUMetrics( + cpuIndex int, + scope tally.Scope, +) nsIndexInsertsByCPUMetrics { + scope = scope.Tagged(map[string]string{ + "cpu-index": strconv.Itoa(cpuIndex), + }) + + const rotate = "rotate-inserts" + return nsIndexInsertsByCPUMetrics{ + rotateInsertsShard: scope.Tagged(map[string]string{ + "rotate-type": "shard-insert", + }).Counter(rotate), + rotateInsertsPending: scope.Tagged(map[string]string{ + "rotate-type": "pending-insert", + }).Counter(rotate), + } } func newNsIndexInsertBatch( namespace namespace.Metadata, nowFn clock.NowFn, + scope tally.Scope, ) *nsIndexInsertBatch { b := &nsIndexInsertBatch{ namespace: namespace, @@ -272,7 +306,9 @@ func newNsIndexInsertBatch( } numCPU := runtime.NumCPU() for i := 0; i < numCPU; i++ { - b.insertsByCPU = append(b.insertsByCPU, &insertsByCPU{}) + b.insertsByCPU = append(b.insertsByCPU, &nsIndexInsertsByCPU{ + metrics: newNamespaceIndexInsertsByCPUMetrics(i, scope), + }) } b.allocateAllInserts() b.Rotate(nil) @@ -311,6 +347,7 @@ func (b *nsIndexInsertBatch) Rotate(target *nsIndexInsertBatch) *sync.WaitGroup // Rotate to target if we need to. if target != nil { for idx, inserts := range b.insertsByCPU { + // First prepare the target to take the current batch's inserts. targetInserts := target.insertsByCPU[idx] targetInserts.Lock() @@ -328,9 +365,10 @@ func (b *nsIndexInsertBatch) Rotate(target *nsIndexInsertBatch) *sync.WaitGroup } prevTargetBatchInserts := targetInserts.batchInserts[:0] + // Lock the current batch inserts now ready to rotate to the target. inserts.Lock() - // Copy current slices to target. + // Update current slice refs to take target's inserts. targetInserts.shardInserts = inserts.shardInserts targetInserts.batchInserts = inserts.batchInserts targetInserts.wg = inserts.wg @@ -342,9 +380,21 @@ func (b *nsIndexInsertBatch) Rotate(target *nsIndexInsertBatch) *sync.WaitGroup // Use new wait group. inserts.wg = b.wg + // Unlock as early as possible for writes to keep enqueuing. inserts.Unlock() + numTargetInsertsShard := len(targetInserts.shardInserts) + numTargetInsertsPending := len(targetInserts.batchInserts) + + // Now can unlock target inserts too. targetInserts.Unlock() + + if n := numTargetInsertsShard; n > 0 { + inserts.metrics.rotateInsertsShard.Inc(int64(n)) + } + if n := numTargetInsertsPending; n > 0 { + inserts.metrics.rotateInsertsPending.Inc(int64(n)) + } } } diff --git a/src/x/sync/index_cpu.go b/src/x/sync/index_cpu.go new file mode 100644 index 0000000000..a6406b4a9f --- /dev/null +++ b/src/x/sync/index_cpu.go @@ -0,0 +1,82 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sync + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +var ( + cpus []int + cpusRead bool +) + +func init() { + cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") + if err != nil { + return + } + + var pnum int + var apic uint64 + lines := strings.Split(string(cpuinfo), "\n") + for i, line := range lines { + if len(line) == 0 && i != 0 { + if int(apic) >= len(cpus) { + realloc := make([]int, 2*apic) + copy(realloc, cpus) + cpus = realloc + } + cpus[apic] = pnum + pnum = 0 + apic = 0 + continue + } + + fields := strings.Fields(line) + + switch fields[0] { + case "processor": + pnum, err = strconv.Atoi(fields[2]) + case "apicid": + apic, err = strconv.ParseUint(fields[2], 10, 64) + } + + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + return + } + } + + cpusRead = true +} + +// IndexCPU returns the current CPU index. +func IndexCPU() int { + if !cpusRead { + return 0 // Likely not linux and nothing available in procinfo. + } + return cpus[CPU()] +} From e6ad37e523a00d33073b09c37a2ce7acca6f2f4a Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 11:51:57 -0400 Subject: [PATCH 07/37] Use RDTSCP for queue selection --- src/dbnode/storage/index_insert_queue.go | 33 +++++---- src/x/sync/cpu_amd64.s | 15 ----- .../sync/{cpu_amd64.go => cpu_linux_amd64.go} | 4 +- src/x/sync/cpu_linux_amd64.s | 14 ++++ ...o => cpu_supported_arch_unsupported_os.go} | 9 +-- .../sync/cpu_unsupported_arch_supported_os.go | 28 ++++++++ .../cpu_unsupported_arch_unsupported_os.go | 28 ++++++++ src/x/sync/index_cpu.go | 67 ++++++++----------- 8 files changed, 120 insertions(+), 78 deletions(-) delete mode 100644 src/x/sync/cpu_amd64.s rename src/x/sync/{cpu_amd64.go => cpu_linux_amd64.go} (94%) create mode 100644 src/x/sync/cpu_linux_amd64.s rename src/x/sync/{cpu_other.go => cpu_supported_arch_unsupported_os.go} (81%) create mode 100644 src/x/sync/cpu_unsupported_arch_supported_os.go create mode 100644 src/x/sync/cpu_unsupported_arch_unsupported_os.go diff --git a/src/dbnode/storage/index_insert_queue.go b/src/dbnode/storage/index_insert_queue.go index addfb9541e..cd311d7374 100644 --- a/src/dbnode/storage/index_insert_queue.go +++ b/src/dbnode/storage/index_insert_queue.go @@ -22,7 +22,6 @@ package storage import ( "errors" - "runtime" "strconv" "sync" "time" @@ -175,7 +174,7 @@ func (q *nsIndexInsertQueue) InsertBatch( batchLen := batch.Len() // Choose the queue relevant to current CPU index - inserts := q.currBatch.insertsByCPU[xsync.IndexCPU()] + inserts := q.currBatch.insertsByCPUCore[xsync.CPUCore()] inserts.Lock() inserts.shardInserts = append(inserts.shardInserts, batch) wg := inserts.wg @@ -198,7 +197,7 @@ func (q *nsIndexInsertQueue) InsertPending( batchLen := len(pending) // Choose the queue relevant to current CPU index - inserts := q.currBatch.insertsByCPU[xsync.IndexCPU()] + inserts := q.currBatch.insertsByCPUCore[xsync.CPUCore()] inserts.Lock() inserts.batchInserts = append(inserts.batchInserts, pending...) wg := inserts.wg @@ -258,34 +257,34 @@ type nsIndexInsertBatch struct { namespace namespace.Metadata nowFn clock.NowFn wg *sync.WaitGroup - insertsByCPU []*nsIndexInsertsByCPU + insertsByCPUCore []*nsIndexInsertsByCPUCore allInserts *index.WriteBatch allInsertsLastReset time.Time } -type nsIndexInsertsByCPU struct { +type nsIndexInsertsByCPUCore struct { sync.Mutex shardInserts []*index.WriteBatch batchInserts []writes.PendingIndexInsert wg *sync.WaitGroup - metrics nsIndexInsertsByCPUMetrics + metrics nsIndexInsertsByCPUCoreMetrics } -type nsIndexInsertsByCPUMetrics struct { +type nsIndexInsertsByCPUCoreMetrics struct { rotateInsertsShard tally.Counter rotateInsertsPending tally.Counter } -func newNamespaceIndexInsertsByCPUMetrics( +func newNamespaceIndexInsertsByCPUCoreMetrics( cpuIndex int, scope tally.Scope, -) nsIndexInsertsByCPUMetrics { +) nsIndexInsertsByCPUCoreMetrics { scope = scope.Tagged(map[string]string{ "cpu-index": strconv.Itoa(cpuIndex), }) const rotate = "rotate-inserts" - return nsIndexInsertsByCPUMetrics{ + return nsIndexInsertsByCPUCoreMetrics{ rotateInsertsShard: scope.Tagged(map[string]string{ "rotate-type": "shard-insert", }).Counter(rotate), @@ -304,10 +303,10 @@ func newNsIndexInsertBatch( namespace: namespace, nowFn: nowFn, } - numCPU := runtime.NumCPU() - for i := 0; i < numCPU; i++ { - b.insertsByCPU = append(b.insertsByCPU, &nsIndexInsertsByCPU{ - metrics: newNamespaceIndexInsertsByCPUMetrics(i, scope), + numCores := xsync.NumCores() + for i := 0; i < numCores; i++ { + b.insertsByCPUCore = append(b.insertsByCPUCore, &nsIndexInsertsByCPUCore{ + metrics: newNamespaceIndexInsertsByCPUCoreMetrics(i, scope), }) } b.allocateAllInserts() @@ -324,7 +323,7 @@ func (b *nsIndexInsertBatch) allocateAllInserts() { func (b *nsIndexInsertBatch) AllInserts() *index.WriteBatch { b.allInserts.Reset() - for _, inserts := range b.insertsByCPU { + for _, inserts := range b.insertsByCPUCore { inserts.Lock() for _, shardInserts := range inserts.shardInserts { b.allInserts.AppendAll(shardInserts) @@ -346,9 +345,9 @@ func (b *nsIndexInsertBatch) Rotate(target *nsIndexInsertBatch) *sync.WaitGroup // Rotate to target if we need to. if target != nil { - for idx, inserts := range b.insertsByCPU { + for idx, inserts := range b.insertsByCPUCore { // First prepare the target to take the current batch's inserts. - targetInserts := target.insertsByCPU[idx] + targetInserts := target.insertsByCPUCore[idx] targetInserts.Lock() // Reset the target inserts since we'll take ref to them in a second. diff --git a/src/x/sync/cpu_amd64.s b/src/x/sync/cpu_amd64.s deleted file mode 100644 index 01146d4ed2..0000000000 --- a/src/x/sync/cpu_amd64.s +++ /dev/null @@ -1,15 +0,0 @@ -#include "textflag.h" - -// func CPU() uint64 -TEXT CPU(SB),NOSPLIT,$0-8 - MOVL $0x01, AX // version information - MOVL $0x00, BX // any leaf will do - MOVL $0x00, CX // any subleaf will do - - // call CPUID - BYTE $0x0f - BYTE $0xa2 - - SHRQ $24, BX // logical cpu id is put in EBX[31-24] - MOVQ BX, ret+0(FP) - RET diff --git a/src/x/sync/cpu_amd64.go b/src/x/sync/cpu_linux_amd64.go similarity index 94% rename from src/x/sync/cpu_amd64.go rename to src/x/sync/cpu_linux_amd64.go index 157be98f29..9d5374fec9 100644 --- a/src/x/sync/cpu_amd64.go +++ b/src/x/sync/cpu_linux_amd64.go @@ -20,5 +20,5 @@ package sync -// CPU is the currently running CPU. -func CPU() uint64 +// getCore is the currently running CPU core. +func getCore() int diff --git a/src/x/sync/cpu_linux_amd64.s b/src/x/sync/cpu_linux_amd64.s new file mode 100644 index 0000000000..63d130965b --- /dev/null +++ b/src/x/sync/cpu_linux_amd64.s @@ -0,0 +1,14 @@ +#include "textflag.h" +#include "go_asm.h" + +#define get_tls(r) MOVQ TLS, r + +// func getCore() int +TEXT ·getCore(SB), NOSPLIT, $0 + // RDTSCP + BYTE $0x0f; BYTE $0x01; BYTE $0xf9 + + // Linux puts core ID in the bottom byte. + ANDQ $0xff, CX + MOVQ CX, ret+0(FP) + RET diff --git a/src/x/sync/cpu_other.go b/src/x/sync/cpu_supported_arch_unsupported_os.go similarity index 81% rename from src/x/sync/cpu_other.go rename to src/x/sync/cpu_supported_arch_unsupported_os.go index d6c062d1d0..a0ceef2d28 100644 --- a/src/x/sync/cpu_other.go +++ b/src/x/sync/cpu_supported_arch_unsupported_os.go @@ -1,4 +1,4 @@ -// +build !amd64 +// +build amd64,!linux // // Copyright (c) 2020 Uber Technologies, Inc. // @@ -22,10 +22,7 @@ package sync -// CPU returns a unique identifier for the core the current goroutine is -// executing on. This function is platform dependent, and is implemented in -// cpu_*.s. -func CPU() uint64 { - // this reverts the behaviour to that of a regular DRWMutex +func getCore() int { + // Reverts to just single core. return 0 } diff --git a/src/x/sync/cpu_unsupported_arch_supported_os.go b/src/x/sync/cpu_unsupported_arch_supported_os.go new file mode 100644 index 0000000000..db5e8e9229 --- /dev/null +++ b/src/x/sync/cpu_unsupported_arch_supported_os.go @@ -0,0 +1,28 @@ +// +build !amd64,linux +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sync + +func getCore() int { + // Reverts to just single core. + return 0 +} diff --git a/src/x/sync/cpu_unsupported_arch_unsupported_os.go b/src/x/sync/cpu_unsupported_arch_unsupported_os.go new file mode 100644 index 0000000000..6675eb37fc --- /dev/null +++ b/src/x/sync/cpu_unsupported_arch_unsupported_os.go @@ -0,0 +1,28 @@ +// +build !amd64,!linux +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sync + +func getCore() int { + // Reverts to just single core. + return 0 +} diff --git a/src/x/sync/index_cpu.go b/src/x/sync/index_cpu.go index a6406b4a9f..fc910fd4e2 100644 --- a/src/x/sync/index_cpu.go +++ b/src/x/sync/index_cpu.go @@ -21,62 +21,53 @@ package sync import ( - "fmt" - "io/ioutil" + "bufio" "os" - "strconv" "strings" ) var ( - cpus []int - cpusRead bool + numCores = 1 ) func init() { - cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") + f, err := os.Open("/proc/cpuinfo") if err != nil { return } - var pnum int - var apic uint64 - lines := strings.Split(string(cpuinfo), "\n") - for i, line := range lines { - if len(line) == 0 && i != 0 { - if int(apic) >= len(cpus) { - realloc := make([]int, 2*apic) - copy(realloc, cpus) - cpus = realloc - } - cpus[apic] = pnum - pnum = 0 - apic = 0 - continue - } - - fields := strings.Fields(line) + defer f.Close() - switch fields[0] { - case "processor": - pnum, err = strconv.Atoi(fields[2]) - case "apicid": - apic, err = strconv.ParseUint(fields[2], 10, 64) + n := 0 + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), "processor") { + n++ } + } - if err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - return - } + if err := scanner.Err(); err != nil { + return } - cpusRead = true + numCores = n +} + +// NumCores returns the number of cores returned from +// /proc/cpuinfo, if not available only returns 1 +func NumCores() int { + return numCores } -// IndexCPU returns the current CPU index. -func IndexCPU() int { - if !cpusRead { - return 0 // Likely not linux and nothing available in procinfo. +// CPUCore returns the current CPU core. +func CPUCore() int { + if numCores == 1 { + // Likely not linux and nothing available in procinfo meaning that + // even if RDTSCP is available we won't have setup correct number + // of cores, etc for our queues since we probed using NumCores + // and got 1 back. + return 0 } - return cpus[CPU()] + // We know the number of cores, try to call RDTSCP to get the core. + return getCore() } From 0f2d9d4a679950f131627e75763e3f470c89fb8b Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 12:00:43 -0400 Subject: [PATCH 08/37] Do not check if empty --- src/dbnode/storage/index/convert/convert.go | 39 +++++++++++++++---- .../storage/index/convert/convert_test.go | 28 ++++++------- src/dbnode/storage/index/results.go | 2 +- 3 files changed, 47 insertions(+), 22 deletions(-) diff --git a/src/dbnode/storage/index/convert/convert.go b/src/dbnode/storage/index/convert/convert.go index f85077a8f8..bf0d659bbf 100644 --- a/src/dbnode/storage/index/convert/convert.go +++ b/src/dbnode/storage/index/convert/convert.go @@ -46,6 +46,31 @@ var ( "corrupt data, unable to extract id") ) +// Validate returns a bool indicating whether the document is valid. +func Validate(d doc.Document) error { + if !utf8.Valid(d.ID) { + return fmt.Errorf("document has invalid ID: id=%v, id_hex=%x", d.ID, d.ID) + } + + for _, f := range d.Fields { + if !utf8.Valid(f.Name) { + return fmt.Errorf("document has invalid field name: name=%v, name_hex=%x", + f.Name, f.Name) + } + + if bytes.Equal(f.Name, ReservedFieldNameID) { + return ErrUsingReservedFieldName + } + + if !utf8.Valid(f.Value) { + return fmt.Errorf("document has invalid field value: value=%v, value_hex=%x", + f.Value, f.Value) + } + } + + return nil +} + // ValidateSeries will validate a series for use with m3ninx. func ValidateSeries(id ident.ID, tags ident.Tags) error { if idBytes := id.Bytes(); !utf8.Valid(idBytes) { @@ -108,7 +133,7 @@ func FromSeriesIDAndTags(id ident.ID, tags ident.Tags) (doc.Document, error) { ID: clonedID, Fields: fields, } - if err := d.Validate(); err != nil { + if err := Validate(d); err != nil { return doc.Document{}, err } return d, nil @@ -147,7 +172,7 @@ func FromSeriesIDAndTagIter(id ident.ID, tags ident.TagIterator) (doc.Document, ID: clonedID, Fields: fields, } - if err := d.Validate(); err != nil { + if err := Validate(d); err != nil { return doc.Document{}, err } return d, nil @@ -249,16 +274,16 @@ func (o Opts) wrapBytes(b []byte) ident.ID { return id } -// ToMetric converts the provided doc to metric id+tags. -func ToMetric(d doc.Document, opts Opts) (ident.ID, ident.TagIterator, error) { +// ToSeries converts the provided doc to metric id+tags. +func ToSeries(d doc.Document, opts Opts) (ident.ID, ident.TagIterator, error) { if len(d.ID) == 0 { return nil, nil, errInvalidResultMissingID } - return opts.wrapBytes(d.ID), ToMetricTags(d, opts), nil + return opts.wrapBytes(d.ID), ToSeriesTags(d, opts), nil } -// ToMetricTags converts the provided doc to metric tags. -func ToMetricTags(d doc.Document, opts Opts) ident.TagIterator { +// ToSeriesTags converts the provided doc to metric tags. +func ToSeriesTags(d doc.Document, opts Opts) ident.TagIterator { return newTagIter(d, opts) } diff --git a/src/dbnode/storage/index/convert/convert_test.go b/src/dbnode/storage/index/convert/convert_test.go index 1a04f77f3f..3a3ab1b043 100644 --- a/src/dbnode/storage/index/convert/convert_test.go +++ b/src/dbnode/storage/index/convert/convert_test.go @@ -49,30 +49,30 @@ func init() { testOpts.IdentPool = idPool } -func TestFromMetricInvalid(t *testing.T) { +func TestFromSeriesIDAndTagsInvalid(t *testing.T) { id := ident.StringID("foo") tags := ident.NewTags( ident.StringTag(string(convert.ReservedFieldNameID), "value"), ) - _, err := convert.FromMetric(id, tags) + _, err := convert.FromSeriesIDAndTags(id, tags) assert.Error(t, err) } -func TestFromMetricIteratorInvalid(t *testing.T) { +func TestFromSeriesIDAndTagIteratorInvalid(t *testing.T) { id := ident.StringID("foo") tags := ident.NewTags( ident.StringTag(string(convert.ReservedFieldNameID), "value"), ) - _, err := convert.FromMetricIter(id, ident.NewTagsIterator(tags)) + _, err := convert.FromSeriesIDAndTagIter(id, ident.NewTagsIterator(tags)) assert.Error(t, err) } -func TestFromMetricValid(t *testing.T) { +func TestFromSeriesIDAndTagsValid(t *testing.T) { id := ident.StringID("foo") tags := ident.NewTags( ident.StringTag("bar", "baz"), ) - d, err := convert.FromMetric(id, tags) + d, err := convert.FromSeriesIDAndTags(id, tags) assert.NoError(t, err) assert.Equal(t, "foo", string(d.ID)) assert.Len(t, d.Fields, 1) @@ -80,12 +80,12 @@ func TestFromMetricValid(t *testing.T) { assert.Equal(t, "baz", string(d.Fields[0].Value)) } -func TestFromMetricIterValid(t *testing.T) { +func TestFromSeriesIDAndTagIterValid(t *testing.T) { id := ident.StringID("foo") tags := ident.NewTags( ident.StringTag("bar", "baz"), ) - d, err := convert.FromMetricIter(id, ident.NewTagsIterator(tags)) + d, err := convert.FromSeriesIDAndTagIter(id, ident.NewTagsIterator(tags)) assert.NoError(t, err) assert.Equal(t, "foo", string(d.ID)) assert.Len(t, d.Fields, 1) @@ -93,7 +93,7 @@ func TestFromMetricIterValid(t *testing.T) { assert.Equal(t, "baz", string(d.Fields[0].Value)) } -func TestToMetricValid(t *testing.T) { +func TestToSeriesValid(t *testing.T) { d := doc.Document{ ID: []byte("foo"), Fields: []doc.Field{ @@ -101,7 +101,7 @@ func TestToMetricValid(t *testing.T) { doc.Field{Name: []byte("some"), Value: []byte("others")}, }, } - id, tags, err := convert.ToMetric(d, testOpts) + id, tags, err := convert.ToSeries(d, testOpts) assert.NoError(t, err) assert.Equal(t, 2, tags.Remaining()) assert.Equal(t, "foo", id.String()) @@ -139,24 +139,24 @@ func TestTagsFromTagsIterNoPool(t *testing.T) { require.True(t, true, expectedTags.Equal(tags)) } -func TestToMetricInvalidID(t *testing.T) { +func TestToSeriesInvalidID(t *testing.T) { d := doc.Document{ Fields: []doc.Field{ doc.Field{Name: []byte("bar"), Value: []byte("baz")}, }, } - _, _, err := convert.ToMetric(d, testOpts) + _, _, err := convert.ToSeries(d, testOpts) assert.Error(t, err) } -func TestToMetricInvalidTag(t *testing.T) { +func TestToSeriesInvalidTag(t *testing.T) { d := doc.Document{ ID: []byte("foo"), Fields: []doc.Field{ doc.Field{Name: convert.ReservedFieldNameID, Value: []byte("baz")}, }, } - _, tags, err := convert.ToMetric(d, testOpts) + _, tags, err := convert.ToSeries(d, testOpts) assert.NoError(t, err) assert.False(t, tags.Next()) assert.Error(t, tags.Err()) diff --git a/src/dbnode/storage/index/results.go b/src/dbnode/storage/index/results.go index 254083a8b0..bc1d0a9135 100644 --- a/src/dbnode/storage/index/results.go +++ b/src/dbnode/storage/index/results.go @@ -143,7 +143,7 @@ func (r *results) addDocumentWithLock(d doc.Document) (bool, int, error) { // i.e. it doesn't exist in the map, so we create the tags wrapping // fields provided by the document. - tags := convert.ToMetricTags(d, convert.Opts{NoClone: true}) + tags := convert.ToSeriesTags(d, convert.Opts{NoClone: true}) // It is assumed that the document is valid for the lifetime of the index // results. From 50c9dac589473308f484a0d7cccfafe68f7adb33 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 19:10:40 -0400 Subject: [PATCH 09/37] Fix metadata creation when inserting into shard results --- go.mod | 2 +- go.sum | 7 +- .../services/m3dbnode/config/config_mock.go | 20 ++ src/dbnode/client/client_mock.go | 14 +- src/dbnode/digest/digest_mock.go | 29 +- src/dbnode/encoding/encoding_mock.go | 104 +++--- src/dbnode/integration/write_quorum_test.go | 3 + src/dbnode/persist/fs/fs_mock.go | 20 ++ src/dbnode/persist/persist_mock.go | 29 +- .../storage/bootstrap/bootstrap_mock.go | 40 ++- src/dbnode/storage/database_test.go | 126 +++++--- .../storage/dirty_series_new_map_gen.go | 2 +- src/dbnode/storage/fs_merge_with_mem_test.go | 33 +- src/dbnode/storage/index_insert_queue.go | 102 +++--- .../storage/index_queue_forward_write_test.go | 4 +- src/dbnode/storage/namespace_test.go | 20 +- src/dbnode/storage/series/buffer_mock.go | 60 ++-- .../storage/series/lookup/lookup_mock.go | 20 ++ src/dbnode/storage/series/series_mock.go | 20 ++ src/dbnode/storage/series/series_test.go | 6 +- src/dbnode/storage/shard.go | 6 +- src/dbnode/storage/shard_index_test.go | 44 +-- src/dbnode/storage/shard_race_prop_test.go | 8 +- src/dbnode/storage/shard_ref_count_test.go | 100 +++--- src/dbnode/storage/shard_test.go | 10 +- src/dbnode/storage/storage_mock.go | 285 ++++++++++------- src/dbnode/ts/writes/write_batch_mock.go | 300 ++++++++++++++++++ src/dbnode/x/xio/io_mock.go | 20 ++ 28 files changed, 1020 insertions(+), 414 deletions(-) create mode 100644 src/dbnode/ts/writes/write_batch_mock.go diff --git a/go.mod b/go.mod index c922b1d4c0..d2fb483c4d 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/go-playground/locales v0.12.2-0.20190430153329-630ebbb60284 // indirect github.com/go-playground/universal-translator v0.16.1-0.20170327191703-71201497bace // indirect github.com/gogo/protobuf v1.3.1 - github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 + github.com/golang/mock v1.4.3 github.com/golang/protobuf v1.3.3 github.com/golang/snappy v0.0.1 github.com/google/go-cmp v0.4.0 diff --git a/go.sum b/go.sum index 6bc91b4d83..8c9347929b 100644 --- a/go.sum +++ b/go.sum @@ -260,8 +260,8 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129 h1:tT8iWCYw4uOem71yYA3htfH+LNopJvcqZQshm56G5L4= -github.com/golang/mock v1.3.1-0.20190508161146-9fa652df1129/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -904,6 +904,7 @@ golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -1033,6 +1034,8 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/src/cmd/services/m3dbnode/config/config_mock.go b/src/cmd/services/m3dbnode/config/config_mock.go index 43c0512d6e..b0e94f8b4d 100644 --- a/src/cmd/services/m3dbnode/config/config_mock.go +++ b/src/cmd/services/m3dbnode/config/config_mock.go @@ -1,6 +1,26 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/cmd/services/m3dbnode/config (interfaces: BootstrapConfigurationValidator) +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Package config is a generated GoMock package. package config diff --git a/src/dbnode/client/client_mock.go b/src/dbnode/client/client_mock.go index 5b35ebd1d9..9da52ea71a 100644 --- a/src/dbnode/client/client_mock.go +++ b/src/dbnode/client/client_mock.go @@ -48,7 +48,7 @@ import ( time0 "github.com/m3db/m3/src/x/time" "github.com/golang/mock/gomock" - tchannel_go "github.com/uber/tchannel-go" + tchannel "github.com/uber/tchannel-go" ) // MockClient is a mock of Client interface @@ -1301,7 +1301,7 @@ func (mr *MockOptionsMockRecorder) WriteConsistencyLevel() *gomock.Call { } // SetChannelOptions mocks base method -func (m *MockOptions) SetChannelOptions(value *tchannel_go.ChannelOptions) Options { +func (m *MockOptions) SetChannelOptions(value *tchannel.ChannelOptions) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetChannelOptions", value) ret0, _ := ret[0].(Options) @@ -1315,10 +1315,10 @@ func (mr *MockOptionsMockRecorder) SetChannelOptions(value interface{}) *gomock. } // ChannelOptions mocks base method -func (m *MockOptions) ChannelOptions() *tchannel_go.ChannelOptions { +func (m *MockOptions) ChannelOptions() *tchannel.ChannelOptions { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChannelOptions") - ret0, _ := ret[0].(*tchannel_go.ChannelOptions) + ret0, _ := ret[0].(*tchannel.ChannelOptions) return ret0 } @@ -2780,7 +2780,7 @@ func (mr *MockAdminOptionsMockRecorder) WriteConsistencyLevel() *gomock.Call { } // SetChannelOptions mocks base method -func (m *MockAdminOptions) SetChannelOptions(value *tchannel_go.ChannelOptions) Options { +func (m *MockAdminOptions) SetChannelOptions(value *tchannel.ChannelOptions) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetChannelOptions", value) ret0, _ := ret[0].(Options) @@ -2794,10 +2794,10 @@ func (mr *MockAdminOptionsMockRecorder) SetChannelOptions(value interface{}) *go } // ChannelOptions mocks base method -func (m *MockAdminOptions) ChannelOptions() *tchannel_go.ChannelOptions { +func (m *MockAdminOptions) ChannelOptions() *tchannel.ChannelOptions { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChannelOptions") - ret0, _ := ret[0].(*tchannel_go.ChannelOptions) + ret0, _ := ret[0].(*tchannel.ChannelOptions) return ret0 } diff --git a/src/dbnode/digest/digest_mock.go b/src/dbnode/digest/digest_mock.go index 31a44d7235..8892e8bb4d 100644 --- a/src/dbnode/digest/digest_mock.go +++ b/src/dbnode/digest/digest_mock.go @@ -1,14 +1,35 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/digest (interfaces: ReaderWithDigest) +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Package digest is a generated GoMock package. package digest import ( - gomock "github.com/golang/mock/gomock" - hash "hash" - io "io" - reflect "reflect" + "hash" + "io" + "reflect" + + "github.com/golang/mock/gomock" ) // MockReaderWithDigest is a mock of ReaderWithDigest interface diff --git a/src/dbnode/encoding/encoding_mock.go b/src/dbnode/encoding/encoding_mock.go index 08ca8cefac..fa61de0683 100644 --- a/src/dbnode/encoding/encoding_mock.go +++ b/src/dbnode/encoding/encoding_mock.go @@ -1,24 +1,46 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/encoding/types.go +// Source: github.com/m3db/m3/src/dbnode/encoding/types.go + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. // Package encoding is a generated GoMock package. package encoding import ( - gomock "github.com/golang/mock/gomock" - namespace "github.com/m3db/m3/src/dbnode/namespace" - ts "github.com/m3db/m3/src/dbnode/ts" - xio "github.com/m3db/m3/src/dbnode/x/xio" - xpool "github.com/m3db/m3/src/dbnode/x/xpool" - checked "github.com/m3db/m3/src/x/checked" - context "github.com/m3db/m3/src/x/context" - ident "github.com/m3db/m3/src/x/ident" - pool "github.com/m3db/m3/src/x/pool" - serialize "github.com/m3db/m3/src/x/serialize" - time "github.com/m3db/m3/src/x/time" - io "io" - reflect "reflect" - time0 "time" + "io" + "reflect" + "time" + + "github.com/m3db/m3/src/dbnode/namespace" + "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/dbnode/x/xpool" + "github.com/m3db/m3/src/x/checked" + "github.com/m3db/m3/src/x/context" + "github.com/m3db/m3/src/x/ident" + "github.com/m3db/m3/src/x/pool" + "github.com/m3db/m3/src/x/serialize" + time0 "github.com/m3db/m3/src/x/time" + + "github.com/golang/mock/gomock" ) // MockEncoder is a mock of Encoder interface @@ -57,7 +79,7 @@ func (mr *MockEncoderMockRecorder) SetSchema(descr interface{}) *gomock.Call { } // Encode mocks base method -func (m *MockEncoder) Encode(dp ts.Datapoint, unit time.Unit, annotation ts.Annotation) error { +func (m *MockEncoder) Encode(dp ts.Datapoint, unit time0.Unit, annotation ts.Annotation) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Encode", dp, unit, annotation) ret0, _ := ret[0].(error) @@ -144,7 +166,7 @@ func (mr *MockEncoderMockRecorder) Len() *gomock.Call { } // Reset mocks base method -func (m *MockEncoder) Reset(t time0.Time, capacity int, schema namespace.SchemaDescr) { +func (m *MockEncoder) Reset(t time.Time, capacity int, schema namespace.SchemaDescr) { m.ctrl.T.Helper() m.ctrl.Call(m, "Reset", t, capacity, schema) } @@ -182,7 +204,7 @@ func (mr *MockEncoderMockRecorder) Discard() *gomock.Call { } // DiscardReset mocks base method -func (m *MockEncoder) DiscardReset(t time0.Time, capacity int, schema namespace.SchemaDescr) ts.Segment { +func (m *MockEncoder) DiscardReset(t time.Time, capacity int, schema namespace.SchemaDescr) ts.Segment { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DiscardReset", t, capacity, schema) ret0, _ := ret[0].(ts.Segment) @@ -219,7 +241,7 @@ func (m *MockOptions) EXPECT() *MockOptionsMockRecorder { } // SetDefaultTimeUnit mocks base method -func (m *MockOptions) SetDefaultTimeUnit(tu time.Unit) Options { +func (m *MockOptions) SetDefaultTimeUnit(tu time0.Unit) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetDefaultTimeUnit", tu) ret0, _ := ret[0].(Options) @@ -233,10 +255,10 @@ func (mr *MockOptionsMockRecorder) SetDefaultTimeUnit(tu interface{}) *gomock.Ca } // DefaultTimeUnit mocks base method -func (m *MockOptions) DefaultTimeUnit() time.Unit { +func (m *MockOptions) DefaultTimeUnit() time0.Unit { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DefaultTimeUnit") - ret0, _ := ret[0].(time.Unit) + ret0, _ := ret[0].(time0.Unit) return ret0 } @@ -247,7 +269,7 @@ func (mr *MockOptionsMockRecorder) DefaultTimeUnit() *gomock.Call { } // SetTimeEncodingSchemes mocks base method -func (m *MockOptions) SetTimeEncodingSchemes(value map[time.Unit]TimeEncodingScheme) Options { +func (m *MockOptions) SetTimeEncodingSchemes(value map[time0.Unit]TimeEncodingScheme) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetTimeEncodingSchemes", value) ret0, _ := ret[0].(Options) @@ -564,11 +586,11 @@ func (mr *MockIteratorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockIterator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { +func (m *MockIterator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time.Unit) + ret1, _ := ret[1].(time0.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -643,11 +665,11 @@ func (mr *MockReaderIteratorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockReaderIterator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { +func (m *MockReaderIterator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time.Unit) + ret1, _ := ret[1].(time0.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -734,11 +756,11 @@ func (mr *MockMultiReaderIteratorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockMultiReaderIterator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { +func (m *MockMultiReaderIterator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time.Unit) + ret1, _ := ret[1].(time0.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -776,7 +798,7 @@ func (mr *MockMultiReaderIteratorMockRecorder) Close() *gomock.Call { } // Reset mocks base method -func (m *MockMultiReaderIterator) Reset(readers []xio.SegmentReader, start time0.Time, blockSize time0.Duration, schema namespace.SchemaDescr) { +func (m *MockMultiReaderIterator) Reset(readers []xio.SegmentReader, start time.Time, blockSize time.Duration, schema namespace.SchemaDescr) { m.ctrl.T.Helper() m.ctrl.Call(m, "Reset", readers, start, blockSize, schema) } @@ -865,11 +887,11 @@ func (mr *MockSeriesIteratorAccumulatorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockSeriesIteratorAccumulator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { +func (m *MockSeriesIteratorAccumulator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time.Unit) + ret1, _ := ret[1].(time0.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -935,10 +957,10 @@ func (mr *MockSeriesIteratorAccumulatorMockRecorder) Namespace() *gomock.Call { } // Start mocks base method -func (m *MockSeriesIteratorAccumulator) Start() time0.Time { +func (m *MockSeriesIteratorAccumulator) Start() time.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) return ret0 } @@ -949,10 +971,10 @@ func (mr *MockSeriesIteratorAccumulatorMockRecorder) Start() *gomock.Call { } // End mocks base method -func (m *MockSeriesIteratorAccumulator) End() time0.Time { +func (m *MockSeriesIteratorAccumulator) End() time.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "End") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) return ret0 } @@ -1082,11 +1104,11 @@ func (mr *MockSeriesIteratorMockRecorder) Next() *gomock.Call { } // Current mocks base method -func (m *MockSeriesIterator) Current() (ts.Datapoint, time.Unit, ts.Annotation) { +func (m *MockSeriesIterator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Current") ret0, _ := ret[0].(ts.Datapoint) - ret1, _ := ret[1].(time.Unit) + ret1, _ := ret[1].(time0.Unit) ret2, _ := ret[2].(ts.Annotation) return ret0, ret1, ret2 } @@ -1152,10 +1174,10 @@ func (mr *MockSeriesIteratorMockRecorder) Namespace() *gomock.Call { } // Start mocks base method -func (m *MockSeriesIterator) Start() time0.Time { +func (m *MockSeriesIterator) Start() time.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) return ret0 } @@ -1166,10 +1188,10 @@ func (mr *MockSeriesIteratorMockRecorder) Start() *gomock.Call { } // End mocks base method -func (m *MockSeriesIterator) End() time0.Time { +func (m *MockSeriesIterator) End() time.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "End") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) return ret0 } diff --git a/src/dbnode/integration/write_quorum_test.go b/src/dbnode/integration/write_quorum_test.go index 82f7cd96ec..b4a08de153 100644 --- a/src/dbnode/integration/write_quorum_test.go +++ b/src/dbnode/integration/write_quorum_test.go @@ -214,6 +214,9 @@ func TestAddNodeQuorumAllUp(t *testing.T) { assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) assert.NoError(t, testWrite(topology.ConsistencyLevelMajority)) assert.Error(t, testWrite(topology.ConsistencyLevelAll)) + + // debug + time.Sleep(10 * time.Minute) } type testWriteFn func(topology.ConsistencyLevel) error diff --git a/src/dbnode/persist/fs/fs_mock.go b/src/dbnode/persist/fs/fs_mock.go index 1ca3c03f55..084b8e2afc 100644 --- a/src/dbnode/persist/fs/fs_mock.go +++ b/src/dbnode/persist/fs/fs_mock.go @@ -1,6 +1,26 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/persist/fs (interfaces: DataFileSetWriter,DataFileSetReader,DataFileSetSeeker,IndexFileSetWriter,IndexFileSetReader,IndexSegmentFileSetWriter,IndexSegmentFileSet,IndexSegmentFile,SnapshotMetadataFileWriter,DataFileSetSeekerManager,ConcurrentDataFileSetSeeker,MergeWith) +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Package fs is a generated GoMock package. package fs diff --git a/src/dbnode/persist/persist_mock.go b/src/dbnode/persist/persist_mock.go index a9bf749a9f..1ec52dcd02 100644 --- a/src/dbnode/persist/persist_mock.go +++ b/src/dbnode/persist/persist_mock.go @@ -1,13 +1,34 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/persist/types.go +// Source: github.com/m3db/m3/src/dbnode/persist/types.go + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. // Package persist is a generated GoMock package. package persist import ( - gomock "github.com/golang/mock/gomock" - uuid "github.com/pborman/uuid" - reflect "reflect" + "reflect" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" ) // MockManager is a mock of Manager interface diff --git a/src/dbnode/storage/bootstrap/bootstrap_mock.go b/src/dbnode/storage/bootstrap/bootstrap_mock.go index 81e1493955..f3e2297922 100644 --- a/src/dbnode/storage/bootstrap/bootstrap_mock.go +++ b/src/dbnode/storage/bootstrap/bootstrap_mock.go @@ -1,18 +1,40 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/storage/bootstrap/types.go +// Source: github.com/m3db/m3/src/dbnode/storage/bootstrap/types.go + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. // Package bootstrap is a generated GoMock package. package bootstrap import ( - gomock "github.com/golang/mock/gomock" - namespace "github.com/m3db/m3/src/dbnode/namespace" - result "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" - topology "github.com/m3db/m3/src/dbnode/topology" - context "github.com/m3db/m3/src/x/context" - ident "github.com/m3db/m3/src/x/ident" - reflect "reflect" - time "time" + "reflect" + "time" + + "github.com/m3db/m3/src/dbnode/namespace" + "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" + "github.com/m3db/m3/src/dbnode/topology" + "github.com/m3db/m3/src/x/context" + "github.com/m3db/m3/src/x/ident" + + "github.com/golang/mock/gomock" ) // MockProcessProvider is a mock of ProcessProvider interface diff --git a/src/dbnode/storage/database_test.go b/src/dbnode/storage/database_test.go index 1b06cc6725..46527674d7 100644 --- a/src/dbnode/storage/database_test.go +++ b/src/dbnode/storage/database_test.go @@ -42,6 +42,7 @@ import ( "github.com/m3db/m3/src/dbnode/topology" "github.com/m3db/m3/src/dbnode/tracepoint" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" xmetrics "github.com/m3db/m3/src/dbnode/x/metrics" "github.com/m3db/m3/src/m3ninx/idx" xclock "github.com/m3db/m3/src/x/clock" @@ -789,13 +790,16 @@ func testDatabaseNamespaceIndexFunctions(t *testing.T, commitlogEnabled bool) { require.NoError(t, d.Open()) var ( - namespace = ident.StringID("testns") - ctx = context.NewContext() - id = ident.StringID("foo") - tagsIter = ident.EmptyTagIterator - s = ts.Series{ - ID: id, - Namespace: namespace, + namespace = ident.StringID("testns") + ctx = context.NewContext() + id = ident.StringID("foo") + tagsIter = ident.EmptyTagIterator + seriesWrite = SeriesWrite{ + Series: ts.Series{ + ID: id, + Namespace: namespace, + }, + WasWritten: true, } ) @@ -805,13 +809,13 @@ func testDatabaseNamespaceIndexFunctions(t *testing.T, commitlogEnabled bool) { ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp)) ns.EXPECT().WriteTagged(gomock.Any(), ident.NewIDMatcher("foo"), gomock.Any(), - time.Time{}, 1.0, xtime.Second, nil).Return(s, true, nil) + time.Time{}, 1.0, xtime.Second, nil).Return(seriesWrite, nil) require.NoError(t, d.WriteTagged(ctx, namespace, id, tagsIter, time.Time{}, 1.0, xtime.Second, nil)) ns.EXPECT().WriteTagged(gomock.Any(), ident.NewIDMatcher("foo"), gomock.Any(), - time.Time{}, 1.0, xtime.Second, nil).Return(s, false, fmt.Errorf("random err")) + time.Time{}, 1.0, xtime.Second, nil).Return(SeriesWrite{}, fmt.Errorf("random err")) require.Error(t, d.WriteTagged(ctx, namespace, ident.StringID("foo"), ident.EmptyTagIterator, time.Time{}, 1.0, xtime.Second, nil)) @@ -993,7 +997,7 @@ func testDatabaseWriteBatch(t *testing.T, encodedTags, ok := encoder.Data() require.True(t, ok) - writes := []struct { + testWrites := []struct { series string t time.Time v float64 @@ -1046,7 +1050,7 @@ func testDatabaseWriteBatch(t *testing.T, require.NoError(t, err) var i int - for _, write := range writes { + for _, write := range testWrites { // Write with the provided index as i*2 so we can assert later that the // ErrorHandler is called with the provided index, not the actual position // in the WriteBatch slice. @@ -1054,32 +1058,39 @@ func testDatabaseWriteBatch(t *testing.T, batchWriter.AddTagged(i*2, ident.StringID(write.series), tagsIter.Duplicate(), encodedTags.Bytes(), write.t, write.v, xtime.Second, nil) wasWritten := write.err == nil - ns.EXPECT().WriteTagged(ctx, ident.NewIDMatcher(write.series), gomock.Any(), - write.t, write.v, xtime.Second, nil).Return( - ts.Series{ - ID: ident.StringID(write.series + "-updated"), - Namespace: namespace, - }, wasWritten, write.err) + ns.EXPECT(). + WriteTagged(ctx, ident.NewIDMatcher(write.series), gomock.Any(), + write.t, write.v, xtime.Second, nil). + Return(SeriesWrite{ + Series: ts.Series{ + ID: ident.StringID(write.series + "-updated"), + Namespace: namespace, + }, WasWritten: wasWritten, + }, write.err) } else { batchWriter.Add(i*2, ident.StringID(write.series), write.t, write.v, xtime.Second, nil) wasWritten := write.err == nil - ns.EXPECT().Write(ctx, ident.NewIDMatcher(write.series), - write.t, write.v, xtime.Second, nil).Return( - ts.Series{ - ID: ident.StringID(write.series + "-updated"), - Namespace: namespace, - }, wasWritten, write.err) + ns.EXPECT(). + Write(ctx, ident.NewIDMatcher(write.series), + write.t, write.v, xtime.Second, nil). + Return(SeriesWrite{ + Series: ts.Series{ + ID: ident.StringID(write.series + "-updated"), + Namespace: namespace, + }, + WasWritten: wasWritten, + }, write.err) } i++ } errHandler := &fakeIndexedErrorHandler{} if tagged { - err = d.WriteTaggedBatch(ctx, namespace, batchWriter.(ts.WriteBatch), + err = d.WriteTaggedBatch(ctx, namespace, batchWriter.(writes.WriteBatch), errHandler) } else { - err = d.WriteBatch(ctx, namespace, batchWriter.(ts.WriteBatch), + err = d.WriteBatch(ctx, namespace, batchWriter.(writes.WriteBatch), errHandler) } @@ -1194,45 +1205,56 @@ func TestUpdateBatchWriterBasedOnShardResults(t *testing.T) { require.NoError(t, d.Open()) var ( - namespace = ident.StringID("testns") - ctx = context.NewContext() - series1 = ts.Series{UniqueIndex: 0} - series2 = ts.Series{UniqueIndex: 1} - series3 = ts.Series{UniqueIndex: 2} - series4 = ts.Series{UniqueIndex: 3} - err = fmt.Errorf("err") + namespace = ident.StringID("testns") + ctx = context.NewContext() + seriesWrite1 = SeriesWrite{Series: ts.Series{UniqueIndex: 0}, WasWritten: true} + seriesWrite2 = SeriesWrite{Series: ts.Series{UniqueIndex: 1}, WasWritten: true} + seriesWrite3 = SeriesWrite{Series: ts.Series{UniqueIndex: 2}, WasWritten: false} + seriesWrite4 = SeriesWrite{Series: ts.Series{UniqueIndex: 3}, WasWritten: false} + err = fmt.Errorf("err") ) - ns.EXPECT().Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any()).Return(series1, true, nil) - ns.EXPECT().Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any()).Return(series2, true, err) - ns.EXPECT().Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any()).Return(series3, false, err) - ns.EXPECT().Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any()).Return(series4, false, nil) + gomock.InOrder( + ns.EXPECT(). + Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()). + Return(seriesWrite1, nil), + ns.EXPECT(). + Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()). + Return(seriesWrite2, err), + ns.EXPECT(). + Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()). + Return(seriesWrite3, err), + ns.EXPECT(). + Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()). + Return(seriesWrite4, nil), + ) - write := ts.Write{ + write := writes.Write{ Series: ts.Series{ID: ident.StringID("foo")}, } - iters := []ts.BatchWrite{ + iters := []writes.BatchWrite{ {Write: write}, {Write: write}, {Write: write}, {Write: write}, } - batchWriter := ts.NewMockWriteBatch(ctrl) - batchWriter.EXPECT().Iter().Return(iters) - batchWriter.EXPECT().Finalize().Times(1) - batchWriter.EXPECT().SetOutcome(0, series1, nil) - batchWriter.EXPECT().SetOutcome(1, series2, err) - batchWriter.EXPECT().SetSkipWrite(1) - batchWriter.EXPECT().SetOutcome(2, series3, err) - batchWriter.EXPECT().SetSkipWrite(2) - batchWriter.EXPECT().SetOutcome(3, series4, nil) - batchWriter.EXPECT().SetSkipWrite(3) + batchWriter := writes.NewMockWriteBatch(ctrl) + gomock.InOrder( + batchWriter.EXPECT().Iter().Return(iters), + batchWriter.EXPECT().SetSeries(0, seriesWrite1.Series), + batchWriter.EXPECT().SetError(1, err), + batchWriter.EXPECT().SetError(2, err), + batchWriter.EXPECT().SetSeries(3, seriesWrite4.Series), + batchWriter.EXPECT().SetSkipWrite(3), + batchWriter.EXPECT().PendingIndex().Return(nil), + batchWriter.EXPECT().Finalize(), + ) errHandler := &fakeIndexedErrorHandler{} d.WriteBatch(ctx, namespace, batchWriter, errHandler) diff --git a/src/dbnode/storage/dirty_series_new_map_gen.go b/src/dbnode/storage/dirty_series_new_map_gen.go index 72721800fc..e72c083b6e 100644 --- a/src/dbnode/storage/dirty_series_new_map_gen.go +++ b/src/dbnode/storage/dirty_series_new_map_gen.go @@ -49,7 +49,7 @@ func newDirtySeriesMap(opts dirtySeriesMapOptions) *dirtySeriesMap { }, copy: func(k idAndBlockStart) idAndBlockStart { return idAndBlockStart{ - id: k.id, + id: append(make([]byte, 0, len(k.id)), k.id...), blockStart: k.blockStart, } }, diff --git a/src/dbnode/storage/fs_merge_with_mem_test.go b/src/dbnode/storage/fs_merge_with_mem_test.go index de8f599bb9..1e64378245 100644 --- a/src/dbnode/storage/fs_merge_with_mem_test.go +++ b/src/dbnode/storage/fs_merge_with_mem_test.go @@ -31,6 +31,7 @@ import ( "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" + xtest "github.com/m3db/m3/src/x/test" xtime "github.com/m3db/m3/src/x/time" "github.com/golang/mock/gomock" @@ -44,7 +45,7 @@ type dirtyData struct { } func TestRead(t *testing.T) { - ctrl := gomock.NewController(t) + ctrl := xtest.NewController(t) defer ctrl.Finish() shard := NewMockdatabaseShard(ctrl) @@ -127,7 +128,7 @@ func TestRead(t *testing.T) { } func TestForEachRemaining(t *testing.T) { - ctrl := gomock.NewController(t) + ctrl := xtest.NewController(t) defer ctrl.Finish() shard := NewMockdatabaseShard(ctrl) @@ -173,10 +174,12 @@ func TestForEachRemaining(t *testing.T) { var forEachCalls []doc.Document shard.EXPECT(). - FetchBlocksForColdFlush(gomock.Any(), id0, xtime.UnixNano(0).ToTime(), version+1, gomock.Any()). + FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id0"), + xtime.UnixNano(0).ToTime(), version+1, gomock.Any()). Return(result, nil) shard.EXPECT(). - FetchBlocksForColdFlush(gomock.Any(), id1, xtime.UnixNano(0).ToTime(), version+1, gomock.Any()). + FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id1"), + xtime.UnixNano(0).ToTime(), version+1, gomock.Any()). Return(result, nil) mergeWith.ForEachRemaining(ctx, 0, func(seriesMetadata doc.Document, result block.FetchBlockResult) error { forEachCalls = append(forEachCalls, seriesMetadata) @@ -191,17 +194,20 @@ func TestForEachRemaining(t *testing.T) { // Read id3 at block start 1, so id2 and id4 should be remaining for block // start 1. shard.EXPECT(). - FetchBlocksForColdFlush(gomock.Any(), id3, xtime.UnixNano(1).ToTime(), version+1, nsCtx). + FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id3"), + xtime.UnixNano(1).ToTime(), version+1, nsCtx). Return(result, nil) res, exists, err := mergeWith.Read(ctx, id3, 1, nsCtx) require.NoError(t, err) assert.True(t, exists) assert.Equal(t, result.Blocks, res) shard.EXPECT(). - FetchBlocksForColdFlush(gomock.Any(), id2, xtime.UnixNano(1).ToTime(), version+1, gomock.Any()). + FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id2"), + xtime.UnixNano(1).ToTime(), version+1, gomock.Any()). Return(result, nil) shard.EXPECT(). - FetchBlocksForColdFlush(gomock.Any(), id4, xtime.UnixNano(1).ToTime(), version+1, gomock.Any()). + FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id4"), + xtime.UnixNano(1).ToTime(), version+1, gomock.Any()). Return(result, nil) err = mergeWith.ForEachRemaining(ctx, 1, func(seriesMetadata doc.Document, result block.FetchBlockResult) error { forEachCalls = append(forEachCalls, seriesMetadata) @@ -209,18 +215,13 @@ func TestForEachRemaining(t *testing.T) { }, nsCtx) require.NoError(t, err) require.Len(t, forEachCalls, 2) - assert.Equal(t, id2, forEachCalls[0]) - assert.Equal(t, id4, forEachCalls[1]) + assert.Equal(t, id2.Bytes(), forEachCalls[0].ID) + assert.Equal(t, id4.Bytes(), forEachCalls[1].ID) - // Test call with error getting tags. shard.EXPECT(). - FetchBlocksForColdFlush(gomock.Any(), id8, xtime.UnixNano(4).ToTime(), version+1, gomock.Any()). + FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id8"), + xtime.UnixNano(4).ToTime(), version+1, gomock.Any()). Return(result, nil) - err = mergeWith.ForEachRemaining(ctx, 4, func(seriesMetadata doc.Document, result block.FetchBlockResult) error { - // This function won't be called with the above error. - return errors.New("unreachable") - }, nsCtx) - assert.Error(t, err) // Test call with bad function execution. err = mergeWith.ForEachRemaining(ctx, 4, func(seriesMetadata doc.Document, result block.FetchBlockResult) error { diff --git a/src/dbnode/storage/index_insert_queue.go b/src/dbnode/storage/index_insert_queue.go index cd311d7374..70f9053dd5 100644 --- a/src/dbnode/storage/index_insert_queue.go +++ b/src/dbnode/storage/index_insert_queue.go @@ -146,9 +146,6 @@ func (q *nsIndexInsertQueue) insertLoop() { backoff = q.indexBatchBackoff - elapsedSinceLastInsert } q.Unlock() - if state != nsIndexInsertQueueStateOpen { - return // Break if the queue closed - } if backoff > 0 { q.sleepFn(backoff) @@ -165,6 +162,10 @@ func (q *nsIndexInsertQueue) insertLoop() { batchWg.Done() lastInsert = q.nowFn() + + if state != nsIndexInsertQueueStateOpen { + return // Break if the queue closed + } } } @@ -344,56 +345,67 @@ func (b *nsIndexInsertBatch) Rotate(target *nsIndexInsertBatch) *sync.WaitGroup b.wg.Add(1) // Rotate to target if we need to. - if target != nil { - for idx, inserts := range b.insertsByCPUCore { - // First prepare the target to take the current batch's inserts. - targetInserts := target.insertsByCPUCore[idx] - targetInserts.Lock() - - // Reset the target inserts since we'll take ref to them in a second. - for i := range targetInserts.shardInserts { - // TODO(prateek): if we start pooling `[]index.WriteBatchEntry`, then we could return to the pool here. - targetInserts.shardInserts[i] = nil - } - prevTargetInserts := targetInserts.shardInserts[:0] - - // memset optimization - var zero writes.PendingIndexInsert - for i := range targetInserts.batchInserts { - targetInserts.batchInserts[i] = zero - } - prevTargetBatchInserts := targetInserts.batchInserts[:0] - - // Lock the current batch inserts now ready to rotate to the target. + + for idx, inserts := range b.insertsByCPUCore { + if target == nil { + // No target to rotate with. inserts.Lock() + // Reset + inserts.shardInserts = inserts.shardInserts[:0] + inserts.batchInserts = inserts.batchInserts[:0] + // Use new wait group. + inserts.wg = b.wg + inserts.Unlock() + continue + } - // Update current slice refs to take target's inserts. - targetInserts.shardInserts = inserts.shardInserts - targetInserts.batchInserts = inserts.batchInserts - targetInserts.wg = inserts.wg + // First prepare the target to take the current batch's inserts. + targetInserts := target.insertsByCPUCore[idx] + targetInserts.Lock() - // Reuse the target's old slices. - inserts.shardInserts = prevTargetInserts - inserts.batchInserts = prevTargetBatchInserts + // Reset the target inserts since we'll take ref to them in a second. + for i := range targetInserts.shardInserts { + // TODO(prateek): if we start pooling `[]index.WriteBatchEntry`, then we could return to the pool here. + targetInserts.shardInserts[i] = nil + } + prevTargetShardInserts := targetInserts.shardInserts[:0] - // Use new wait group. - inserts.wg = b.wg + // memset optimization + var zero writes.PendingIndexInsert + for i := range targetInserts.batchInserts { + targetInserts.batchInserts[i] = zero + } + prevTargetBatchInserts := targetInserts.batchInserts[:0] - // Unlock as early as possible for writes to keep enqueuing. - inserts.Unlock() + // Lock the current batch inserts now ready to rotate to the target. + inserts.Lock() - numTargetInsertsShard := len(targetInserts.shardInserts) - numTargetInsertsPending := len(targetInserts.batchInserts) + // Update current slice refs to take target's inserts. + targetInserts.shardInserts = inserts.shardInserts + targetInserts.batchInserts = inserts.batchInserts + targetInserts.wg = inserts.wg - // Now can unlock target inserts too. - targetInserts.Unlock() + // Reuse the target's old slices. + inserts.shardInserts = prevTargetShardInserts + inserts.batchInserts = prevTargetBatchInserts - if n := numTargetInsertsShard; n > 0 { - inserts.metrics.rotateInsertsShard.Inc(int64(n)) - } - if n := numTargetInsertsPending; n > 0 { - inserts.metrics.rotateInsertsPending.Inc(int64(n)) - } + // Use new wait group. + inserts.wg = b.wg + + // Unlock as early as possible for writes to keep enqueuing. + inserts.Unlock() + + numTargetInsertsShard := len(targetInserts.shardInserts) + numTargetInsertsPending := len(targetInserts.batchInserts) + + // Now can unlock target inserts too. + targetInserts.Unlock() + + if n := numTargetInsertsShard; n > 0 { + inserts.metrics.rotateInsertsShard.Inc(int64(n)) + } + if n := numTargetInsertsPending; n > 0 { + inserts.metrics.rotateInsertsPending.Inc(int64(n)) } } diff --git a/src/dbnode/storage/index_queue_forward_write_test.go b/src/dbnode/storage/index_queue_forward_write_test.go index 544bfa3b8f..ec406fbafa 100644 --- a/src/dbnode/storage/index_queue_forward_write_test.go +++ b/src/dbnode/storage/index_queue_forward_write_test.go @@ -404,7 +404,7 @@ func writeToShard( tag := ident.Tag{Name: ident.StringID(id), Value: ident.StringID("")} idTags := ident.NewTags(tag) iter := ident.NewTagsIterator(idTags) - _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID(id), iter, now, + seriesWrite, err := shard.WriteTagged(ctx, ident.StringID(id), iter, now, 1.0, xtime.Second, nil, series.WriteOptions{ TruncateType: series.TypeBlock, TransformOptions: series.WriteTransformOptions{ @@ -413,7 +413,7 @@ func writeToShard( }, }) require.NoError(t, err) - require.Equal(t, shouldWrite, wasWritten) + require.Equal(t, shouldWrite, seriesWrite.WasWritten) } func verifyShard( diff --git a/src/dbnode/storage/namespace_test.go b/src/dbnode/storage/namespace_test.go index c4cd3af62c..83325af8ee 100644 --- a/src/dbnode/storage/namespace_test.go +++ b/src/dbnode/storage/namespace_test.go @@ -192,11 +192,11 @@ func TestNamespaceWriteShardNotOwned(t *testing.T) { ns.shards[i] = nil } now := time.Now() - _, wasWritten, err := ns.Write(ctx, ident.StringID("foo"), now, 0.0, xtime.Second, nil) + seriesWrite, err := ns.Write(ctx, ident.StringID("foo"), now, 0.0, xtime.Second, nil) require.Error(t, err) require.True(t, xerrors.IsRetryableError(err)) require.Equal(t, "not responsible for shard 0", err.Error()) - require.False(t, wasWritten) + require.False(t, seriesWrite.WasWritten) } func TestNamespaceWriteShardOwned(t *testing.T) { @@ -227,13 +227,13 @@ func TestNamespaceWriteShardOwned(t *testing.T) { ns.shards[testShardIDs[0].ID()] = shard - _, wasWritten, err := ns.Write(ctx, id, now, val, unit, ant) + seriesWrite, err := ns.Write(ctx, id, now, val, unit, ant) require.NoError(t, err) - require.True(t, wasWritten) + require.True(t, seriesWrite.WasWritten) - _, wasWritten, err = ns.Write(ctx, id, now, val, unit, ant) + seriesWrite, err = ns.Write(ctx, id, now, val, unit, ant) require.NoError(t, err) - require.False(t, wasWritten) + require.False(t, seriesWrite.WasWritten) } } @@ -1116,15 +1116,15 @@ func TestNamespaceIndexInsert(t *testing.T) { ns.shards[testShardIDs[0].ID()] = shard - _, wasWritten, err := ns.WriteTagged(ctx, ident.StringID("a"), + seriesWrite, err := ns.WriteTagged(ctx, ident.StringID("a"), ident.EmptyTagIterator, now, 1.0, xtime.Second, nil) require.NoError(t, err) - require.True(t, wasWritten) + require.True(t, seriesWrite.WasWritten) - _, wasWritten, err = ns.WriteTagged(ctx, ident.StringID("a"), + seriesWrite, err = ns.WriteTagged(ctx, ident.StringID("a"), ident.EmptyTagIterator, now, 1.0, xtime.Second, nil) require.NoError(t, err) - require.False(t, wasWritten) + require.False(t, seriesWrite.WasWritten) shard.EXPECT().Close() idx.EXPECT().Close().Return(nil) diff --git a/src/dbnode/storage/series/buffer_mock.go b/src/dbnode/storage/series/buffer_mock.go index 55afef6d27..09a5c4f4c3 100644 --- a/src/dbnode/storage/series/buffer_mock.go +++ b/src/dbnode/storage/series/buffer_mock.go @@ -1,20 +1,42 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/storage/series/buffer.go +// Source: github.com/m3db/m3/src/dbnode/storage/series/buffer.go + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. // Package series is a generated GoMock package. package series import ( - gomock "github.com/golang/mock/gomock" - namespace "github.com/m3db/m3/src/dbnode/namespace" - persist "github.com/m3db/m3/src/dbnode/persist" - block "github.com/m3db/m3/src/dbnode/storage/block" - xio "github.com/m3db/m3/src/dbnode/x/xio" - context "github.com/m3db/m3/src/x/context" - ident "github.com/m3db/m3/src/x/ident" - time "github.com/m3db/m3/src/x/time" - reflect "reflect" - time0 "time" + "reflect" + "time" + + "github.com/m3db/m3/src/dbnode/namespace" + "github.com/m3db/m3/src/dbnode/persist" + "github.com/m3db/m3/src/dbnode/storage/block" + "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/x/context" + "github.com/m3db/m3/src/x/ident" + time0 "github.com/m3db/m3/src/x/time" + + "github.com/golang/mock/gomock" ) // MockdatabaseBuffer is a mock of databaseBuffer interface @@ -41,7 +63,7 @@ func (m *MockdatabaseBuffer) EXPECT() *MockdatabaseBufferMockRecorder { } // Write mocks base method -func (m *MockdatabaseBuffer) Write(ctx context.Context, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte, wOpts WriteOptions) (bool, WriteType, error) { +func (m *MockdatabaseBuffer) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts WriteOptions) (bool, WriteType, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation, wOpts) ret0, _ := ret[0].(bool) @@ -57,7 +79,7 @@ func (mr *MockdatabaseBufferMockRecorder) Write(ctx, id, timestamp, value, unit, } // Snapshot mocks base method -func (m *MockdatabaseBuffer) Snapshot(ctx context.Context, blockStart time0.Time, metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context) error { +func (m *MockdatabaseBuffer) Snapshot(ctx context.Context, blockStart time.Time, metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Snapshot", ctx, blockStart, metadata, persistFn, nsCtx) ret0, _ := ret[0].(error) @@ -71,7 +93,7 @@ func (mr *MockdatabaseBufferMockRecorder) Snapshot(ctx, blockStart, metadata, pe } // WarmFlush mocks base method -func (m *MockdatabaseBuffer) WarmFlush(ctx context.Context, blockStart time0.Time, metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context) (FlushOutcome, error) { +func (m *MockdatabaseBuffer) WarmFlush(ctx context.Context, blockStart time.Time, metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context) (FlushOutcome, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WarmFlush", ctx, blockStart, metadata, persistFn, nsCtx) ret0, _ := ret[0].(FlushOutcome) @@ -86,7 +108,7 @@ func (mr *MockdatabaseBufferMockRecorder) WarmFlush(ctx, blockStart, metadata, p } // ReadEncoded mocks base method -func (m *MockdatabaseBuffer) ReadEncoded(ctx context.Context, start, end time0.Time, nsCtx namespace.Context) ([][]xio.BlockReader, error) { +func (m *MockdatabaseBuffer) ReadEncoded(ctx context.Context, start, end time.Time, nsCtx namespace.Context) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, start, end, nsCtx) ret0, _ := ret[0].([][]xio.BlockReader) @@ -101,7 +123,7 @@ func (mr *MockdatabaseBufferMockRecorder) ReadEncoded(ctx, start, end, nsCtx int } // FetchBlocksForColdFlush mocks base method -func (m *MockdatabaseBuffer) FetchBlocksForColdFlush(ctx context.Context, start time0.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) { +func (m *MockdatabaseBuffer) FetchBlocksForColdFlush(ctx context.Context, start time.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksForColdFlush", ctx, start, version, nsCtx) ret0, _ := ret[0].(block.FetchBlockResult) @@ -116,7 +138,7 @@ func (mr *MockdatabaseBufferMockRecorder) FetchBlocksForColdFlush(ctx, start, ve } // FetchBlocks mocks base method -func (m *MockdatabaseBuffer) FetchBlocks(ctx context.Context, starts []time0.Time, nsCtx namespace.Context) []block.FetchBlockResult { +func (m *MockdatabaseBuffer) FetchBlocks(ctx context.Context, starts []time.Time, nsCtx namespace.Context) []block.FetchBlockResult { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, starts, nsCtx) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -130,7 +152,7 @@ func (mr *MockdatabaseBufferMockRecorder) FetchBlocks(ctx, starts, nsCtx interfa } // FetchBlocksMetadata mocks base method -func (m *MockdatabaseBuffer) FetchBlocksMetadata(ctx context.Context, start, end time0.Time, opts FetchBlocksMetadataOptions) (block.FetchBlockMetadataResults, error) { +func (m *MockdatabaseBuffer) FetchBlocksMetadata(ctx context.Context, start, end time.Time, opts FetchBlocksMetadataOptions) (block.FetchBlockMetadataResults, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadata", ctx, start, end, opts) ret0, _ := ret[0].(block.FetchBlockMetadataResults) @@ -159,7 +181,7 @@ func (mr *MockdatabaseBufferMockRecorder) IsEmpty() *gomock.Call { } // ColdFlushBlockStarts mocks base method -func (m *MockdatabaseBuffer) ColdFlushBlockStarts(blockStates map[time.UnixNano]BlockState) OptimizedTimes { +func (m *MockdatabaseBuffer) ColdFlushBlockStarts(blockStates map[time0.UnixNano]BlockState) OptimizedTimes { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ColdFlushBlockStarts", blockStates) ret0, _ := ret[0].(OptimizedTimes) diff --git a/src/dbnode/storage/series/lookup/lookup_mock.go b/src/dbnode/storage/series/lookup/lookup_mock.go index 9f8ac4e03c..ebcdf5f5a2 100644 --- a/src/dbnode/storage/series/lookup/lookup_mock.go +++ b/src/dbnode/storage/series/lookup/lookup_mock.go @@ -1,6 +1,26 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/storage/series/lookup (interfaces: OnReleaseReadWriteRef) +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Package lookup is a generated GoMock package. package lookup diff --git a/src/dbnode/storage/series/series_mock.go b/src/dbnode/storage/series/series_mock.go index 23a1bba2a8..ca2240c093 100644 --- a/src/dbnode/storage/series/series_mock.go +++ b/src/dbnode/storage/series/series_mock.go @@ -1,6 +1,26 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/storage/series (interfaces: DatabaseSeries,QueryableBlockRetriever) +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Package series is a generated GoMock package. package series diff --git a/src/dbnode/storage/series/series_test.go b/src/dbnode/storage/series/series_test.go index 1050fe79bb..74e3fceac4 100644 --- a/src/dbnode/storage/series/series_test.go +++ b/src/dbnode/storage/series/series_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/storage/index/convert" "github.com/m3db/m3/src/dbnode/clock" @@ -516,7 +517,6 @@ func TestSeriesFlush(t *testing.T) { AnyTimes() series := NewDatabaseSeries(DatabaseSeriesOptions{ - ID: ident.StringID("foo"), BlockRetriever: blockRetriever, Options: opts, }).(*dbSeries) @@ -525,12 +525,12 @@ func TestSeriesFlush(t *testing.T) { assert.NoError(t, err) ctx := context.NewContext() - series.buffer.Write(ctx, curr, 1234, xtime.Second, nil, WriteOptions{}) + series.buffer.Write(ctx, testID, curr, 1234, xtime.Second, nil, WriteOptions{}) ctx.BlockingClose() inputs := []error{errors.New("some error"), nil} for _, input := range inputs { - persistFn := func(_ ident.ID, _ ident.Tags, _ ts.Segment, _ uint32) error { + persistFn := func(_ persist.Metadata, _ ts.Segment, _ uint32) error { return input } ctx := context.NewContext() diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index 4a807e62b1..c680de0020 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -1167,11 +1167,9 @@ func (s *dbShard) newShardEntry( // Hence this stays on the storage/series.DatabaseSeries for when it needs // to be re-indexed. var ( - seriesID ident.BytesID seriesMetadata doc.Document err error ) - switch tagsArgOpts.arg { case tagsIterArg: // NB(r): Rewind so we record the tag iterator from the beginning. @@ -1181,7 +1179,7 @@ func (s *dbShard) newShardEntry( // Pass nil for the identifier pool because the pool will force us to use an array // with a large capacity to store the tags. Since these tags are long-lived, it's // better to allocate an array of the exact size to save memory. - seriesMetadata, err = convert.FromSeriesIDAndTagIter(seriesID, tagsIter) + seriesMetadata, err = convert.FromSeriesIDAndTagIter(id, tagsIter) tagsIter.Close() if err != nil { return nil, err @@ -1198,7 +1196,7 @@ func (s *dbShard) newShardEntry( } // Use the same bytes as the series metadata for the ID. - seriesID = ident.BytesID(seriesMetadata.ID) + seriesID := ident.BytesID(seriesMetadata.ID) uniqueIndex := s.increasingIndex.nextIndex() newSeries := s.seriesPool.Get() diff --git a/src/dbnode/storage/shard_index_test.go b/src/dbnode/storage/shard_index_test.go index 094f1a3c5a..df28627b38 100644 --- a/src/dbnode/storage/shard_index_test.go +++ b/src/dbnode/storage/shard_index_test.go @@ -79,22 +79,22 @@ func TestShardInsertNamespaceIndex(t *testing.T) { ctx := context.NewContext() defer ctx.Close() - _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), now, 1.0, xtime.Second, nil, series.WriteOptions{}) require.NoError(t, err) - require.True(t, wasWritten) + require.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), now, 2.0, xtime.Second, nil, series.WriteOptions{}) require.NoError(t, err) - require.True(t, wasWritten) + require.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write( + seriesWrite, err = shard.Write( ctx, ident.StringID("baz"), now, 1.0, xtime.Second, nil, series.WriteOptions{}) require.NoError(t, err) - require.True(t, wasWritten) + require.True(t, seriesWrite.WasWritten) lock.Lock() defer lock.Unlock() @@ -129,25 +129,25 @@ func TestShardAsyncInsertNamespaceIndex(t *testing.T) { ctx := context.NewContext() defer ctx.Close() now := time.Now() - _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), now, + seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"), + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.NewTagsIterator(ident.NewTags( ident.StringTag("all", "tags"), ident.StringTag("should", "be-present"), )), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) for { lock.RLock() @@ -216,11 +216,11 @@ func TestShardAsyncIndexOnlyWhenNotIndexed(t *testing.T) { ctx := context.NewContext() defer ctx.Close() - _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) for { if l := atomic.LoadInt32(&numCalls); l == 1 { @@ -230,18 +230,18 @@ func TestShardAsyncIndexOnlyWhenNotIndexed(t *testing.T) { } // ensure we don't index once we have already indexed - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), now.Add(time.Second), 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // ensure attempting to write same point yields false and does not write - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), now.Add(time.Second), 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.False(t, wasWritten) + assert.False(t, seriesWrite.WasWritten) l := atomic.LoadInt32(&numCalls) assert.Equal(t, int32(1), l) @@ -288,11 +288,11 @@ func TestShardAsyncIndexIfExpired(t *testing.T) { ctx := context.NewContext() defer ctx.Close() - _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // wait till we're done indexing. indexed := xclock.WaitUntil(func() bool { @@ -302,11 +302,11 @@ func TestShardAsyncIndexIfExpired(t *testing.T) { // ensure we index because it's expired nextWriteTime := now.Add(blockSize) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), nextWriteTime, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // wait till we're done indexing. reIndexed := xclock.WaitUntil(func() bool { diff --git a/src/dbnode/storage/shard_race_prop_test.go b/src/dbnode/storage/shard_race_prop_test.go index b6d33f2dee..294f17aa16 100644 --- a/src/dbnode/storage/shard_race_prop_test.go +++ b/src/dbnode/storage/shard_race_prop_test.go @@ -187,9 +187,9 @@ func testShardTickWriteRace(t *testing.T, tickBatchSize, numSeries int) { <-barrier ctx := context.NewContext() now := time.Now() - _, wasWritten, err := shard.Write(ctx, id, now, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err := shard.Write(ctx, id, now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) ctx.BlockingClose() }() } @@ -284,9 +284,9 @@ func TestShardTickBootstrapWriteRace(t *testing.T) { <-barrier ctx := context.NewContext() now := time.Now() - _, wasWritten, err := shard.Write(ctx, id, now, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err := shard.Write(ctx, id, now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) ctx.BlockingClose() }() } diff --git a/src/dbnode/storage/shard_ref_count_test.go b/src/dbnode/storage/shard_ref_count_test.go index f6afa61c05..ad761745f0 100644 --- a/src/dbnode/storage/shard_ref_count_test.go +++ b/src/dbnode/storage/shard_ref_count_test.go @@ -66,21 +66,21 @@ func testShardWriteSyncRefCount(t *testing.T, opts Options) { ctx := context.NewContext() defer ctx.Close() - _, wasWritten, err := shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err := shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.False(t, wasWritten) + assert.False(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), now, 2.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), now, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("baz"), now, 3.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("baz"), now, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // ensure all entries have no references left for _, id := range []string{"foo", "bar", "baz"} { @@ -94,17 +94,17 @@ func testShardWriteSyncRefCount(t *testing.T, opts Options) { // write already inserted series' next := now.Add(time.Minute) - _, wasWritten, err = shard.Write(ctx, ident.StringID("foo"), next, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("foo"), next, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), next, 2.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), next, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("baz"), next, 3.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("baz"), next, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // ensure all entries have no references left for _, id := range []string{"foo", "bar", "baz"} { @@ -188,17 +188,17 @@ func testShardWriteTaggedSyncRefCount(t *testing.T, idx NamespaceIndex) { ctx := context.NewContext() defer ctx.Close() - _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, now, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, now, 2.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, now, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, now, 3.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, now, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // ensure all entries have no references left for _, id := range []string{"foo", "bar", "baz"} { @@ -212,17 +212,17 @@ func testShardWriteTaggedSyncRefCount(t *testing.T, idx NamespaceIndex) { // write already inserted series' next := now.Add(time.Minute) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, next, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, next, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, next, 2.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, next, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, next, 3.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, next, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // ensure all entries have no references left for _, id := range []string{"foo", "bar", "baz"} { @@ -256,17 +256,17 @@ func TestShardWriteAsyncRefCount(t *testing.T) { ctx := context.NewContext() defer ctx.Close() - _, wasWritten, err := shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err := shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), now, 2.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), now, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("baz"), now, 3.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("baz"), now, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) inserted := xclock.WaitUntil(func() bool { counter, ok := testReporter.Counters()["dbshard.insert-queue.inserts"] @@ -286,17 +286,17 @@ func TestShardWriteAsyncRefCount(t *testing.T) { // write already inserted series' next := now.Add(time.Minute) - _, wasWritten, err = shard.Write(ctx, ident.StringID("foo"), next, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("foo"), next, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), next, 2.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), next, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.Write(ctx, ident.StringID("baz"), next, 3.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.Write(ctx, ident.StringID("baz"), next, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // ensure all entries have no references left for _, id := range []string{"foo", "bar", "baz"} { @@ -408,20 +408,20 @@ func testShardWriteTaggedAsyncRefCount(t *testing.T, idx NamespaceIndex, nowFn f ctx := context.NewContext() defer ctx.Close() - _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"), + seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("bar"), + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, now, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"), + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, now, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) inserted := xclock.WaitUntil(func() bool { counter, ok := testReporter.Counters()["dbshard.insert-queue.inserts"] @@ -441,17 +441,17 @@ func testShardWriteTaggedAsyncRefCount(t *testing.T, idx NamespaceIndex, nowFn f // write already inserted series' next := now.Add(time.Minute) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, next, 1.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, next, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, next, 2.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, next, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) - _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, next, 3.0, xtime.Second, nil, series.WriteOptions{}) + seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, next, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.True(t, wasWritten) + assert.True(t, seriesWrite.WasWritten) // ensure all entries have no references left for _, id := range []string{"foo", "bar", "baz"} { diff --git a/src/dbnode/storage/shard_test.go b/src/dbnode/storage/shard_test.go index 02d0c4ef73..9248eafa63 100644 --- a/src/dbnode/storage/shard_test.go +++ b/src/dbnode/storage/shard_test.go @@ -878,13 +878,13 @@ func writeShardAndVerify( expectedShouldWrite bool, expectedIdx uint64, ) { - series, wasWritten, err := shard.Write(ctx, ident.StringID(id), + seriesWrite, err := shard.Write(ctx, ident.StringID(id), now, value, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) - assert.Equal(t, expectedShouldWrite, wasWritten) - assert.Equal(t, id, series.ID.String()) - assert.Equal(t, "testns1", series.Namespace.String()) - assert.Equal(t, expectedIdx, series.UniqueIndex) + assert.Equal(t, expectedShouldWrite, seriesWrite.WasWritten) + assert.Equal(t, id, seriesWrite.Series.ID.String()) + assert.Equal(t, "testns1", seriesWrite.Series.Namespace.String()) + assert.Equal(t, expectedIdx, seriesWrite.Series.UniqueIndex) } func TestShardTick(t *testing.T) { diff --git a/src/dbnode/storage/storage_mock.go b/src/dbnode/storage/storage_mock.go index e40aea187e..308788f47f 100644 --- a/src/dbnode/storage/storage_mock.go +++ b/src/dbnode/storage/storage_mock.go @@ -1,40 +1,60 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: /Users/r/go/src/github.com/m3db/m3/src/dbnode/storage/types.go +// Source: github.com/m3db/m3/src/dbnode/storage/types.go + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. // Package storage is a generated GoMock package. package storage import ( - reflect "reflect" - sync0 "sync" - time0 "time" - - gomock "github.com/golang/mock/gomock" - clock "github.com/m3db/m3/src/dbnode/clock" - encoding "github.com/m3db/m3/src/dbnode/encoding" - namespace "github.com/m3db/m3/src/dbnode/namespace" - persist "github.com/m3db/m3/src/dbnode/persist" - fs "github.com/m3db/m3/src/dbnode/persist/fs" - commitlog "github.com/m3db/m3/src/dbnode/persist/fs/commitlog" - runtime "github.com/m3db/m3/src/dbnode/runtime" - sharding "github.com/m3db/m3/src/dbnode/sharding" - block "github.com/m3db/m3/src/dbnode/storage/block" - bootstrap "github.com/m3db/m3/src/dbnode/storage/bootstrap" - result "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" - index "github.com/m3db/m3/src/dbnode/storage/index" - repair "github.com/m3db/m3/src/dbnode/storage/repair" - series "github.com/m3db/m3/src/dbnode/storage/series" - ts "github.com/m3db/m3/src/dbnode/ts" + "reflect" + "sync" + "time" + + "github.com/m3db/m3/src/dbnode/clock" + "github.com/m3db/m3/src/dbnode/encoding" + "github.com/m3db/m3/src/dbnode/namespace" + "github.com/m3db/m3/src/dbnode/persist" + "github.com/m3db/m3/src/dbnode/persist/fs" + "github.com/m3db/m3/src/dbnode/persist/fs/commitlog" + "github.com/m3db/m3/src/dbnode/runtime" + "github.com/m3db/m3/src/dbnode/sharding" + "github.com/m3db/m3/src/dbnode/storage/block" + "github.com/m3db/m3/src/dbnode/storage/bootstrap" + "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" + "github.com/m3db/m3/src/dbnode/storage/index" + "github.com/m3db/m3/src/dbnode/storage/repair" + "github.com/m3db/m3/src/dbnode/storage/series" "github.com/m3db/m3/src/dbnode/ts/writes" - xio "github.com/m3db/m3/src/dbnode/x/xio" - xpool "github.com/m3db/m3/src/dbnode/x/xpool" - context "github.com/m3db/m3/src/x/context" - ident "github.com/m3db/m3/src/x/ident" - instrument "github.com/m3db/m3/src/x/instrument" - mmap "github.com/m3db/m3/src/x/mmap" - pool "github.com/m3db/m3/src/x/pool" - sync "github.com/m3db/m3/src/x/sync" - time "github.com/m3db/m3/src/x/time" + "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/dbnode/x/xpool" + "github.com/m3db/m3/src/x/context" + "github.com/m3db/m3/src/x/ident" + "github.com/m3db/m3/src/x/instrument" + "github.com/m3db/m3/src/x/mmap" + "github.com/m3db/m3/src/x/pool" + sync0 "github.com/m3db/m3/src/x/sync" + time0 "github.com/m3db/m3/src/x/time" + + "github.com/golang/mock/gomock" ) // MockIndexedErrorHandler is a mock of IndexedErrorHandler interface @@ -207,7 +227,7 @@ func (mr *MockDatabaseMockRecorder) Terminate() *gomock.Call { } // Write mocks base method -func (m *MockDatabase) Write(ctx context.Context, namespace, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) error { +func (m *MockDatabase) Write(ctx context.Context, namespace, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, namespace, id, timestamp, value, unit, annotation) ret0, _ := ret[0].(error) @@ -221,7 +241,7 @@ func (mr *MockDatabaseMockRecorder) Write(ctx, namespace, id, timestamp, value, } // WriteTagged mocks base method -func (m *MockDatabase) WriteTagged(ctx context.Context, namespace, id ident.ID, tags ident.TagIterator, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) error { +func (m *MockDatabase) WriteTagged(ctx context.Context, namespace, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTagged", ctx, namespace, id, tags, timestamp, value, unit, annotation) ret0, _ := ret[0].(error) @@ -308,7 +328,7 @@ func (mr *MockDatabaseMockRecorder) AggregateQuery(ctx, namespace, query, opts i } // ReadEncoded mocks base method -func (m *MockDatabase) ReadEncoded(ctx context.Context, namespace, id ident.ID, start, end time0.Time) ([][]xio.BlockReader, error) { +func (m *MockDatabase) ReadEncoded(ctx context.Context, namespace, id ident.ID, start, end time.Time) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, namespace, id, start, end) ret0, _ := ret[0].([][]xio.BlockReader) @@ -323,7 +343,7 @@ func (mr *MockDatabaseMockRecorder) ReadEncoded(ctx, namespace, id, start, end i } // FetchBlocks mocks base method -func (m *MockDatabase) FetchBlocks(ctx context.Context, namespace ident.ID, shard uint32, id ident.ID, starts []time0.Time) ([]block.FetchBlockResult, error) { +func (m *MockDatabase) FetchBlocks(ctx context.Context, namespace ident.ID, shard uint32, id ident.ID, starts []time.Time) ([]block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, namespace, shard, id, starts) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -338,7 +358,7 @@ func (mr *MockDatabaseMockRecorder) FetchBlocks(ctx, namespace, shard, id, start } // FetchBlocksMetadataV2 mocks base method -func (m *MockDatabase) FetchBlocksMetadataV2(ctx context.Context, namespace ident.ID, shard uint32, start, end time0.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { +func (m *MockDatabase) FetchBlocksMetadataV2(ctx context.Context, namespace ident.ID, shard uint32, start, end time.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadataV2", ctx, namespace, shard, start, end, limit, pageToken, opts) ret0, _ := ret[0].(block.FetchBlocksMetadataResults) @@ -453,7 +473,7 @@ func (mr *MockDatabaseMockRecorder) BootstrapState() *gomock.Call { } // FlushState mocks base method -func (m *MockDatabase) FlushState(namespace ident.ID, shardID uint32, blockStart time0.Time) (fileOpState, error) { +func (m *MockDatabase) FlushState(namespace ident.ID, shardID uint32, blockStart time.Time) (fileOpState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FlushState", namespace, shardID, blockStart) ret0, _ := ret[0].(fileOpState) @@ -602,7 +622,7 @@ func (mr *MockdatabaseMockRecorder) Terminate() *gomock.Call { } // Write mocks base method -func (m *Mockdatabase) Write(ctx context.Context, namespace, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) error { +func (m *Mockdatabase) Write(ctx context.Context, namespace, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, namespace, id, timestamp, value, unit, annotation) ret0, _ := ret[0].(error) @@ -616,7 +636,7 @@ func (mr *MockdatabaseMockRecorder) Write(ctx, namespace, id, timestamp, value, } // WriteTagged mocks base method -func (m *Mockdatabase) WriteTagged(ctx context.Context, namespace, id ident.ID, tags ident.TagIterator, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) error { +func (m *Mockdatabase) WriteTagged(ctx context.Context, namespace, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTagged", ctx, namespace, id, tags, timestamp, value, unit, annotation) ret0, _ := ret[0].(error) @@ -703,7 +723,7 @@ func (mr *MockdatabaseMockRecorder) AggregateQuery(ctx, namespace, query, opts i } // ReadEncoded mocks base method -func (m *Mockdatabase) ReadEncoded(ctx context.Context, namespace, id ident.ID, start, end time0.Time) ([][]xio.BlockReader, error) { +func (m *Mockdatabase) ReadEncoded(ctx context.Context, namespace, id ident.ID, start, end time.Time) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, namespace, id, start, end) ret0, _ := ret[0].([][]xio.BlockReader) @@ -718,7 +738,7 @@ func (mr *MockdatabaseMockRecorder) ReadEncoded(ctx, namespace, id, start, end i } // FetchBlocks mocks base method -func (m *Mockdatabase) FetchBlocks(ctx context.Context, namespace ident.ID, shard uint32, id ident.ID, starts []time0.Time) ([]block.FetchBlockResult, error) { +func (m *Mockdatabase) FetchBlocks(ctx context.Context, namespace ident.ID, shard uint32, id ident.ID, starts []time.Time) ([]block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, namespace, shard, id, starts) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -733,7 +753,7 @@ func (mr *MockdatabaseMockRecorder) FetchBlocks(ctx, namespace, shard, id, start } // FetchBlocksMetadataV2 mocks base method -func (m *Mockdatabase) FetchBlocksMetadataV2(ctx context.Context, namespace ident.ID, shard uint32, start, end time0.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { +func (m *Mockdatabase) FetchBlocksMetadataV2(ctx context.Context, namespace ident.ID, shard uint32, start, end time.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadataV2", ctx, namespace, shard, start, end, limit, pageToken, opts) ret0, _ := ret[0].(block.FetchBlocksMetadataResults) @@ -848,7 +868,7 @@ func (mr *MockdatabaseMockRecorder) BootstrapState() *gomock.Call { } // FlushState mocks base method -func (m *Mockdatabase) FlushState(namespace ident.ID, shardID uint32, blockStart time0.Time) (fileOpState, error) { +func (m *Mockdatabase) FlushState(namespace ident.ID, shardID uint32, blockStart time.Time) (fileOpState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FlushState", namespace, shardID, blockStart) ret0, _ := ret[0].(fileOpState) @@ -1204,7 +1224,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) OwnedShards() *gomock.Call { } // Tick mocks base method -func (m *MockdatabaseNamespace) Tick(c context.Cancellable, startTime time0.Time) error { +func (m *MockdatabaseNamespace) Tick(c context.Cancellable, startTime time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", c, startTime) ret0, _ := ret[0].(error) @@ -1218,13 +1238,12 @@ func (mr *MockdatabaseNamespaceMockRecorder) Tick(c, startTime interface{}) *gom } // Write mocks base method -func (m *MockdatabaseNamespace) Write(ctx context.Context, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) (ts.Series, bool, error) { +func (m *MockdatabaseNamespace) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) (SeriesWrite, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation) - ret0, _ := ret[0].(ts.Series) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret0, _ := ret[0].(SeriesWrite) + ret1, _ := ret[1].(error) + return ret0, ret1 } // Write indicates an expected call of Write @@ -1234,13 +1253,12 @@ func (mr *MockdatabaseNamespaceMockRecorder) Write(ctx, id, timestamp, value, un } // WriteTagged mocks base method -func (m *MockdatabaseNamespace) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time0.Time, value float64, unit time.Unit, annotation []byte) (ts.Series, bool, error) { +func (m *MockdatabaseNamespace) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) (SeriesWrite, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTagged", ctx, id, tags, timestamp, value, unit, annotation) - ret0, _ := ret[0].(ts.Series) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret0, _ := ret[0].(SeriesWrite) + ret1, _ := ret[1].(error) + return ret0, ret1 } // WriteTagged indicates an expected call of WriteTagged @@ -1280,7 +1298,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) AggregateQuery(ctx, query, opts int } // ReadEncoded mocks base method -func (m *MockdatabaseNamespace) ReadEncoded(ctx context.Context, id ident.ID, start, end time0.Time) ([][]xio.BlockReader, error) { +func (m *MockdatabaseNamespace) ReadEncoded(ctx context.Context, id ident.ID, start, end time.Time) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, id, start, end) ret0, _ := ret[0].([][]xio.BlockReader) @@ -1295,7 +1313,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) ReadEncoded(ctx, id, start, end int } // FetchBlocks mocks base method -func (m *MockdatabaseNamespace) FetchBlocks(ctx context.Context, shardID uint32, id ident.ID, starts []time0.Time) ([]block.FetchBlockResult, error) { +func (m *MockdatabaseNamespace) FetchBlocks(ctx context.Context, shardID uint32, id ident.ID, starts []time.Time) ([]block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, shardID, id, starts) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -1310,7 +1328,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) FetchBlocks(ctx, shardID, id, start } // FetchBlocksMetadataV2 mocks base method -func (m *MockdatabaseNamespace) FetchBlocksMetadataV2(ctx context.Context, shardID uint32, start, end time0.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { +func (m *MockdatabaseNamespace) FetchBlocksMetadataV2(ctx context.Context, shardID uint32, start, end time.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadataV2", ctx, shardID, start, end, limit, pageToken, opts) ret0, _ := ret[0].(block.FetchBlocksMetadataResults) @@ -1355,7 +1373,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) Bootstrap(ctx, bootstrapResult inte } // WarmFlush mocks base method -func (m *MockdatabaseNamespace) WarmFlush(blockStart time0.Time, flush persist.FlushPreparer) error { +func (m *MockdatabaseNamespace) WarmFlush(blockStart time.Time, flush persist.FlushPreparer) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WarmFlush", blockStart, flush) ret0, _ := ret[0].(error) @@ -1397,7 +1415,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) ColdFlush(flush interface{}) *gomoc } // Snapshot mocks base method -func (m *MockdatabaseNamespace) Snapshot(blockStart, snapshotTime time0.Time, flush persist.SnapshotPreparer) error { +func (m *MockdatabaseNamespace) Snapshot(blockStart, snapshotTime time.Time, flush persist.SnapshotPreparer) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Snapshot", blockStart, snapshotTime, flush) ret0, _ := ret[0].(error) @@ -1411,7 +1429,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) Snapshot(blockStart, snapshotTime, } // NeedsFlush mocks base method -func (m *MockdatabaseNamespace) NeedsFlush(alignedInclusiveStart, alignedInclusiveEnd time0.Time) (bool, error) { +func (m *MockdatabaseNamespace) NeedsFlush(alignedInclusiveStart, alignedInclusiveEnd time.Time) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NeedsFlush", alignedInclusiveStart, alignedInclusiveEnd) ret0, _ := ret[0].(bool) @@ -1441,7 +1459,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) Truncate() *gomock.Call { } // Repair mocks base method -func (m *MockdatabaseNamespace) Repair(repairer databaseShardRepairer, tr time.Range) error { +func (m *MockdatabaseNamespace) Repair(repairer databaseShardRepairer, tr time0.Range) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Repair", repairer, tr) ret0, _ := ret[0].(error) @@ -1469,7 +1487,7 @@ func (mr *MockdatabaseNamespaceMockRecorder) BootstrapState() *gomock.Call { } // FlushState mocks base method -func (m *MockdatabaseNamespace) FlushState(shardID uint32, blockStart time0.Time) (fileOpState, error) { +func (m *MockdatabaseNamespace) FlushState(shardID uint32, blockStart time.Time) (fileOpState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FlushState", shardID, blockStart) ret0, _ := ret[0].(fileOpState) @@ -1499,6 +1517,20 @@ func (mr *MockdatabaseNamespaceMockRecorder) SeriesReadWriteRef(shardID, id, tag return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SeriesReadWriteRef", reflect.TypeOf((*MockdatabaseNamespace)(nil).SeriesReadWriteRef), shardID, id, tags) } +// WritePendingIndexInserts mocks base method +func (m *MockdatabaseNamespace) WritePendingIndexInserts(pending []writes.PendingIndexInsert) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WritePendingIndexInserts", pending) + ret0, _ := ret[0].(error) + return ret0 +} + +// WritePendingIndexInserts indicates an expected call of WritePendingIndexInserts +func (mr *MockdatabaseNamespaceMockRecorder) WritePendingIndexInserts(pending interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePendingIndexInserts", reflect.TypeOf((*MockdatabaseNamespace)(nil).WritePendingIndexInserts), pending) +} + // MockShard is a mock of Shard interface type MockShard struct { ctrl *gomock.Controller @@ -1658,7 +1690,7 @@ func (mr *MockdatabaseShardMockRecorder) BootstrapState() *gomock.Call { } // OnEvictedFromWiredList mocks base method -func (m *MockdatabaseShard) OnEvictedFromWiredList(id ident.ID, blockStart time0.Time) { +func (m *MockdatabaseShard) OnEvictedFromWiredList(id ident.ID, blockStart time.Time) { m.ctrl.T.Helper() m.ctrl.Call(m, "OnEvictedFromWiredList", id, blockStart) } @@ -1684,7 +1716,7 @@ func (mr *MockdatabaseShardMockRecorder) Close() *gomock.Call { } // Tick mocks base method -func (m *MockdatabaseShard) Tick(c context.Cancellable, startTime time0.Time, nsCtx namespace.Context) (tickResult, error) { +func (m *MockdatabaseShard) Tick(c context.Cancellable, startTime time.Time, nsCtx namespace.Context) (tickResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", c, startTime, nsCtx) ret0, _ := ret[0].(tickResult) @@ -1699,13 +1731,12 @@ func (mr *MockdatabaseShardMockRecorder) Tick(c, startTime, nsCtx interface{}) * } // Write mocks base method -func (m *MockdatabaseShard) Write(ctx context.Context, id ident.ID, timestamp time0.Time, value float64, unit time.Unit, annotation []byte, wOpts series.WriteOptions) (ts.Series, bool, error) { +func (m *MockdatabaseShard) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts series.WriteOptions) (SeriesWrite, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation, wOpts) - ret0, _ := ret[0].(ts.Series) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret0, _ := ret[0].(SeriesWrite) + ret1, _ := ret[1].(error) + return ret0, ret1 } // Write indicates an expected call of Write @@ -1715,13 +1746,12 @@ func (mr *MockdatabaseShardMockRecorder) Write(ctx, id, timestamp, value, unit, } // WriteTagged mocks base method -func (m *MockdatabaseShard) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time0.Time, value float64, unit time.Unit, annotation []byte, wOpts series.WriteOptions) (ts.Series, bool, error) { +func (m *MockdatabaseShard) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts series.WriteOptions) (SeriesWrite, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTagged", ctx, id, tags, timestamp, value, unit, annotation, wOpts) - ret0, _ := ret[0].(ts.Series) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret0, _ := ret[0].(SeriesWrite) + ret1, _ := ret[1].(error) + return ret0, ret1 } // WriteTagged indicates an expected call of WriteTagged @@ -1731,7 +1761,7 @@ func (mr *MockdatabaseShardMockRecorder) WriteTagged(ctx, id, tags, timestamp, v } // ReadEncoded mocks base method -func (m *MockdatabaseShard) ReadEncoded(ctx context.Context, id ident.ID, start, end time0.Time, nsCtx namespace.Context) ([][]xio.BlockReader, error) { +func (m *MockdatabaseShard) ReadEncoded(ctx context.Context, id ident.ID, start, end time.Time, nsCtx namespace.Context) ([][]xio.BlockReader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadEncoded", ctx, id, start, end, nsCtx) ret0, _ := ret[0].([][]xio.BlockReader) @@ -1746,7 +1776,7 @@ func (mr *MockdatabaseShardMockRecorder) ReadEncoded(ctx, id, start, end, nsCtx } // FetchBlocks mocks base method -func (m *MockdatabaseShard) FetchBlocks(ctx context.Context, id ident.ID, starts []time0.Time, nsCtx namespace.Context) ([]block.FetchBlockResult, error) { +func (m *MockdatabaseShard) FetchBlocks(ctx context.Context, id ident.ID, starts []time.Time, nsCtx namespace.Context) ([]block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocks", ctx, id, starts, nsCtx) ret0, _ := ret[0].([]block.FetchBlockResult) @@ -1761,7 +1791,7 @@ func (mr *MockdatabaseShardMockRecorder) FetchBlocks(ctx, id, starts, nsCtx inte } // FetchBlocksForColdFlush mocks base method -func (m *MockdatabaseShard) FetchBlocksForColdFlush(ctx context.Context, seriesID ident.ID, start time0.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) { +func (m *MockdatabaseShard) FetchBlocksForColdFlush(ctx context.Context, seriesID ident.ID, start time.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksForColdFlush", ctx, seriesID, start, version, nsCtx) ret0, _ := ret[0].(block.FetchBlockResult) @@ -1776,7 +1806,7 @@ func (mr *MockdatabaseShardMockRecorder) FetchBlocksForColdFlush(ctx, seriesID, } // FetchBlocksMetadataV2 mocks base method -func (m *MockdatabaseShard) FetchBlocksMetadataV2(ctx context.Context, start, end time0.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { +func (m *MockdatabaseShard) FetchBlocksMetadataV2(ctx context.Context, start, end time.Time, limit int64, pageToken PageToken, opts block.FetchBlocksMetadataOptions) (block.FetchBlocksMetadataResults, PageToken, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchBlocksMetadataV2", ctx, start, end, limit, pageToken, opts) ret0, _ := ret[0].(block.FetchBlocksMetadataResults) @@ -1846,7 +1876,7 @@ func (mr *MockdatabaseShardMockRecorder) LoadBlocks(series interface{}) *gomock. } // WarmFlush mocks base method -func (m *MockdatabaseShard) WarmFlush(blockStart time0.Time, flush persist.FlushPreparer, nsCtx namespace.Context) error { +func (m *MockdatabaseShard) WarmFlush(blockStart time.Time, flush persist.FlushPreparer, nsCtx namespace.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WarmFlush", blockStart, flush, nsCtx) ret0, _ := ret[0].(error) @@ -1875,7 +1905,7 @@ func (mr *MockdatabaseShardMockRecorder) ColdFlush(flush, resources, nsCtx, onFl } // Snapshot mocks base method -func (m *MockdatabaseShard) Snapshot(blockStart, snapshotStart time0.Time, flush persist.SnapshotPreparer, nsCtx namespace.Context) error { +func (m *MockdatabaseShard) Snapshot(blockStart, snapshotStart time.Time, flush persist.SnapshotPreparer, nsCtx namespace.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Snapshot", blockStart, snapshotStart, flush, nsCtx) ret0, _ := ret[0].(error) @@ -1889,7 +1919,7 @@ func (mr *MockdatabaseShardMockRecorder) Snapshot(blockStart, snapshotStart, flu } // FlushState mocks base method -func (m *MockdatabaseShard) FlushState(blockStart time0.Time) (fileOpState, error) { +func (m *MockdatabaseShard) FlushState(blockStart time.Time) (fileOpState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FlushState", blockStart) ret0, _ := ret[0].(fileOpState) @@ -1904,7 +1934,7 @@ func (mr *MockdatabaseShardMockRecorder) FlushState(blockStart interface{}) *gom } // CleanupExpiredFileSets mocks base method -func (m *MockdatabaseShard) CleanupExpiredFileSets(earliestToRetain time0.Time) error { +func (m *MockdatabaseShard) CleanupExpiredFileSets(earliestToRetain time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CleanupExpiredFileSets", earliestToRetain) ret0, _ := ret[0].(error) @@ -1932,7 +1962,7 @@ func (mr *MockdatabaseShardMockRecorder) CleanupCompactedFileSets() *gomock.Call } // Repair mocks base method -func (m *MockdatabaseShard) Repair(ctx context.Context, nsCtx namespace.Context, nsMeta namespace.Metadata, tr time.Range, repairer databaseShardRepairer) (repair.MetadataComparisonResult, error) { +func (m *MockdatabaseShard) Repair(ctx context.Context, nsCtx namespace.Context, nsMeta namespace.Metadata, tr time0.Range, repairer databaseShardRepairer) (repair.MetadataComparisonResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Repair", ctx, nsCtx, nsMeta, tr, repairer) ret0, _ := ret[0].(repair.MetadataComparisonResult) @@ -2034,10 +2064,10 @@ func (mr *MockNamespaceIndexMockRecorder) AssignShardSet(shardSet interface{}) * } // BlockStartForWriteTime mocks base method -func (m *MockNamespaceIndex) BlockStartForWriteTime(writeTime time0.Time) time.UnixNano { +func (m *MockNamespaceIndex) BlockStartForWriteTime(writeTime time.Time) time0.UnixNano { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BlockStartForWriteTime", writeTime) - ret0, _ := ret[0].(time.UnixNano) + ret0, _ := ret[0].(time0.UnixNano) return ret0 } @@ -2048,7 +2078,7 @@ func (mr *MockNamespaceIndexMockRecorder) BlockStartForWriteTime(writeTime inter } // BlockForBlockStart mocks base method -func (m *MockNamespaceIndex) BlockForBlockStart(blockStart time0.Time) (index.Block, error) { +func (m *MockNamespaceIndex) BlockForBlockStart(blockStart time.Time) (index.Block, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BlockForBlockStart", blockStart) ret0, _ := ret[0].(index.Block) @@ -2076,6 +2106,20 @@ func (mr *MockNamespaceIndexMockRecorder) WriteBatch(batch interface{}) *gomock. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatch", reflect.TypeOf((*MockNamespaceIndex)(nil).WriteBatch), batch) } +// WritePending mocks base method +func (m *MockNamespaceIndex) WritePending(pending []writes.PendingIndexInsert) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WritePending", pending) + ret0, _ := ret[0].(error) + return ret0 +} + +// WritePending indicates an expected call of WritePending +func (mr *MockNamespaceIndexMockRecorder) WritePending(pending interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePending", reflect.TypeOf((*MockNamespaceIndex)(nil).WritePending), pending) +} + // Query mocks base method func (m *MockNamespaceIndex) Query(ctx context.Context, query index.Query, opts index.QueryOptions) (index.QueryResult, error) { m.ctrl.T.Helper() @@ -2135,7 +2179,7 @@ func (mr *MockNamespaceIndexMockRecorder) BootstrapsDone() *gomock.Call { } // CleanupExpiredFileSets mocks base method -func (m *MockNamespaceIndex) CleanupExpiredFileSets(t time0.Time) error { +func (m *MockNamespaceIndex) CleanupExpiredFileSets(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CleanupExpiredFileSets", t) ret0, _ := ret[0].(error) @@ -2163,7 +2207,7 @@ func (mr *MockNamespaceIndexMockRecorder) CleanupDuplicateFileSets() *gomock.Cal } // Tick mocks base method -func (m *MockNamespaceIndex) Tick(c context.Cancellable, startTime time0.Time) (namespaceIndexTickResult, error) { +func (m *MockNamespaceIndex) Tick(c context.Cancellable, startTime time.Time) (namespaceIndexTickResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", c, startTime) ret0, _ := ret[0].(namespaceIndexTickResult) @@ -2286,10 +2330,10 @@ func (mr *MocknamespaceIndexInsertQueueMockRecorder) Stop() *gomock.Call { } // InsertBatch mocks base method -func (m *MocknamespaceIndexInsertQueue) InsertBatch(batch *index.WriteBatch) (*sync0.WaitGroup, error) { +func (m *MocknamespaceIndexInsertQueue) InsertBatch(batch *index.WriteBatch) (*sync.WaitGroup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "InsertBatch", batch) - ret0, _ := ret[0].(*sync0.WaitGroup) + ret0, _ := ret[0].(*sync.WaitGroup) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -2300,6 +2344,21 @@ func (mr *MocknamespaceIndexInsertQueueMockRecorder) InsertBatch(batch interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertBatch", reflect.TypeOf((*MocknamespaceIndexInsertQueue)(nil).InsertBatch), batch) } +// InsertPending mocks base method +func (m *MocknamespaceIndexInsertQueue) InsertPending(pending []writes.PendingIndexInsert) (*sync.WaitGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InsertPending", pending) + ret0, _ := ret[0].(*sync.WaitGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InsertPending indicates an expected call of InsertPending +func (mr *MocknamespaceIndexInsertQueueMockRecorder) InsertPending(pending interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertPending", reflect.TypeOf((*MocknamespaceIndexInsertQueue)(nil).InsertPending), pending) +} + // MockdatabaseBootstrapManager is a mock of databaseBootstrapManager interface type MockdatabaseBootstrapManager struct { ctrl *gomock.Controller @@ -2338,10 +2397,10 @@ func (mr *MockdatabaseBootstrapManagerMockRecorder) IsBootstrapped() *gomock.Cal } // LastBootstrapCompletionTime mocks base method -func (m *MockdatabaseBootstrapManager) LastBootstrapCompletionTime() (time0.Time, bool) { +func (m *MockdatabaseBootstrapManager) LastBootstrapCompletionTime() (time.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastBootstrapCompletionTime") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -2403,7 +2462,7 @@ func (m *MockdatabaseFlushManager) EXPECT() *MockdatabaseFlushManagerMockRecorde } // Flush mocks base method -func (m *MockdatabaseFlushManager) Flush(startTime time0.Time) error { +func (m *MockdatabaseFlushManager) Flush(startTime time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Flush", startTime) ret0, _ := ret[0].(error) @@ -2417,10 +2476,10 @@ func (mr *MockdatabaseFlushManagerMockRecorder) Flush(startTime interface{}) *go } // LastSuccessfulSnapshotStartTime mocks base method -func (m *MockdatabaseFlushManager) LastSuccessfulSnapshotStartTime() (time0.Time, bool) { +func (m *MockdatabaseFlushManager) LastSuccessfulSnapshotStartTime() (time.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -2467,7 +2526,7 @@ func (m *MockdatabaseCleanupManager) EXPECT() *MockdatabaseCleanupManagerMockRec } // Cleanup mocks base method -func (m *MockdatabaseCleanupManager) Cleanup(t time0.Time, isBootstrapped bool) error { +func (m *MockdatabaseCleanupManager) Cleanup(t time.Time, isBootstrapped bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Cleanup", t, isBootstrapped) ret0, _ := ret[0].(error) @@ -2516,7 +2575,7 @@ func (m *MockdatabaseFileSystemManager) EXPECT() *MockdatabaseFileSystemManagerM } // Cleanup mocks base method -func (m *MockdatabaseFileSystemManager) Cleanup(t time0.Time, isBootstrapped bool) error { +func (m *MockdatabaseFileSystemManager) Cleanup(t time.Time, isBootstrapped bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Cleanup", t, isBootstrapped) ret0, _ := ret[0].(error) @@ -2530,7 +2589,7 @@ func (mr *MockdatabaseFileSystemManagerMockRecorder) Cleanup(t, isBootstrapped i } // Flush mocks base method -func (m *MockdatabaseFileSystemManager) Flush(t time0.Time) error { +func (m *MockdatabaseFileSystemManager) Flush(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Flush", t) ret0, _ := ret[0].(error) @@ -2586,7 +2645,7 @@ func (mr *MockdatabaseFileSystemManagerMockRecorder) Status() *gomock.Call { } // Run mocks base method -func (m *MockdatabaseFileSystemManager) Run(t time0.Time, runType runType, forceType forceType) bool { +func (m *MockdatabaseFileSystemManager) Run(t time.Time, runType runType, forceType forceType) bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Run", t, runType, forceType) ret0, _ := ret[0].(bool) @@ -2612,10 +2671,10 @@ func (mr *MockdatabaseFileSystemManagerMockRecorder) Report() *gomock.Call { } // LastSuccessfulSnapshotStartTime mocks base method -func (m *MockdatabaseFileSystemManager) LastSuccessfulSnapshotStartTime() (time0.Time, bool) { +func (m *MockdatabaseFileSystemManager) LastSuccessfulSnapshotStartTime() (time.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -2664,7 +2723,7 @@ func (mr *MockdatabaseShardRepairerMockRecorder) Options() *gomock.Call { } // Repair mocks base method -func (m *MockdatabaseShardRepairer) Repair(ctx context.Context, nsCtx namespace.Context, nsMeta namespace.Metadata, tr time.Range, shard databaseShard) (repair.MetadataComparisonResult, error) { +func (m *MockdatabaseShardRepairer) Repair(ctx context.Context, nsCtx namespace.Context, nsMeta namespace.Metadata, tr time0.Range, shard databaseShard) (repair.MetadataComparisonResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Repair", ctx, nsCtx, nsMeta, tr, shard) ret0, _ := ret[0].(repair.MetadataComparisonResult) @@ -2775,7 +2834,7 @@ func (m *MockdatabaseTickManager) EXPECT() *MockdatabaseTickManagerMockRecorder } // Tick mocks base method -func (m *MockdatabaseTickManager) Tick(forceType forceType, startTime time0.Time) error { +func (m *MockdatabaseTickManager) Tick(forceType forceType, startTime time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", forceType, startTime) ret0, _ := ret[0].(error) @@ -2840,10 +2899,10 @@ func (mr *MockdatabaseMediatorMockRecorder) IsBootstrapped() *gomock.Call { } // LastBootstrapCompletionTime mocks base method -func (m *MockdatabaseMediator) LastBootstrapCompletionTime() (time0.Time, bool) { +func (m *MockdatabaseMediator) LastBootstrapCompletionTime() (time.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastBootstrapCompletionTime") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -2894,7 +2953,7 @@ func (mr *MockdatabaseMediatorMockRecorder) EnableFileOps() *gomock.Call { } // Tick mocks base method -func (m *MockdatabaseMediator) Tick(forceType forceType, startTime time0.Time) error { +func (m *MockdatabaseMediator) Tick(forceType forceType, startTime time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Tick", forceType, startTime) ret0, _ := ret[0].(error) @@ -2960,10 +3019,10 @@ func (mr *MockdatabaseMediatorMockRecorder) Report() *gomock.Call { } // LastSuccessfulSnapshotStartTime mocks base method -func (m *MockdatabaseMediator) LastSuccessfulSnapshotStartTime() (time0.Time, bool) { +func (m *MockdatabaseMediator) LastSuccessfulSnapshotStartTime() (time.Time, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime") - ret0, _ := ret[0].(time0.Time) + ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(bool) return ret0, ret1 } @@ -3283,7 +3342,7 @@ func (mr *MockOptionsMockRecorder) RuntimeOptionsManager() *gomock.Call { } // SetErrorWindowForLoad mocks base method -func (m *MockOptions) SetErrorWindowForLoad(value time0.Duration) Options { +func (m *MockOptions) SetErrorWindowForLoad(value time.Duration) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetErrorWindowForLoad", value) ret0, _ := ret[0].(Options) @@ -3297,10 +3356,10 @@ func (mr *MockOptionsMockRecorder) SetErrorWindowForLoad(value interface{}) *gom } // ErrorWindowForLoad mocks base method -func (m *MockOptions) ErrorWindowForLoad() time0.Duration { +func (m *MockOptions) ErrorWindowForLoad() time.Duration { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ErrorWindowForLoad") - ret0, _ := ret[0].(time0.Duration) + ret0, _ := ret[0].(time.Duration) return ret0 } @@ -3899,7 +3958,7 @@ func (mr *MockOptionsMockRecorder) FetchBlocksMetadataResultsPool() *gomock.Call } // SetQueryIDsWorkerPool mocks base method -func (m *MockOptions) SetQueryIDsWorkerPool(value sync.WorkerPool) Options { +func (m *MockOptions) SetQueryIDsWorkerPool(value sync0.WorkerPool) Options { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetQueryIDsWorkerPool", value) ret0, _ := ret[0].(Options) @@ -3913,10 +3972,10 @@ func (mr *MockOptionsMockRecorder) SetQueryIDsWorkerPool(value interface{}) *gom } // QueryIDsWorkerPool mocks base method -func (m *MockOptions) QueryIDsWorkerPool() sync.WorkerPool { +func (m *MockOptions) QueryIDsWorkerPool() sync0.WorkerPool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "QueryIDsWorkerPool") - ret0, _ := ret[0].(sync.WorkerPool) + ret0, _ := ret[0].(sync0.WorkerPool) return ret0 } diff --git a/src/dbnode/ts/writes/write_batch_mock.go b/src/dbnode/ts/writes/write_batch_mock.go new file mode 100644 index 0000000000..b4c6a15c1c --- /dev/null +++ b/src/dbnode/ts/writes/write_batch_mock.go @@ -0,0 +1,300 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/m3db/m3/src/dbnode/ts/writes/types.go + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package writes is a generated GoMock package. +package writes + +import ( + "reflect" + "time" + + "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/x/ident" + time0 "github.com/m3db/m3/src/x/time" + + "github.com/golang/mock/gomock" +) + +// MockWriteBatch is a mock of WriteBatch interface +type MockWriteBatch struct { + ctrl *gomock.Controller + recorder *MockWriteBatchMockRecorder +} + +// MockWriteBatchMockRecorder is the mock recorder for MockWriteBatch +type MockWriteBatchMockRecorder struct { + mock *MockWriteBatch +} + +// NewMockWriteBatch creates a new mock instance +func NewMockWriteBatch(ctrl *gomock.Controller) *MockWriteBatch { + mock := &MockWriteBatch{ctrl: ctrl} + mock.recorder = &MockWriteBatchMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockWriteBatch) EXPECT() *MockWriteBatchMockRecorder { + return m.recorder +} + +// Add mocks base method +func (m *MockWriteBatch) Add(originalIndex int, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", originalIndex, id, timestamp, value, unit, annotation) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add +func (mr *MockWriteBatchMockRecorder) Add(originalIndex, id, timestamp, value, unit, annotation interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockWriteBatch)(nil).Add), originalIndex, id, timestamp, value, unit, annotation) +} + +// AddTagged mocks base method +func (m *MockWriteBatch) AddTagged(originalIndex int, id ident.ID, tags ident.TagIterator, encodedTags ts.EncodedTags, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddTagged", originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddTagged indicates an expected call of AddTagged +func (mr *MockWriteBatchMockRecorder) AddTagged(originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTagged", reflect.TypeOf((*MockWriteBatch)(nil).AddTagged), originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation) +} + +// SetFinalizeEncodedTagsFn mocks base method +func (m *MockWriteBatch) SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFinalizeEncodedTagsFn", f) +} + +// SetFinalizeEncodedTagsFn indicates an expected call of SetFinalizeEncodedTagsFn +func (mr *MockWriteBatchMockRecorder) SetFinalizeEncodedTagsFn(f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizeEncodedTagsFn", reflect.TypeOf((*MockWriteBatch)(nil).SetFinalizeEncodedTagsFn), f) +} + +// SetFinalizeAnnotationFn mocks base method +func (m *MockWriteBatch) SetFinalizeAnnotationFn(f FinalizeAnnotationFn) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFinalizeAnnotationFn", f) +} + +// SetFinalizeAnnotationFn indicates an expected call of SetFinalizeAnnotationFn +func (mr *MockWriteBatchMockRecorder) SetFinalizeAnnotationFn(f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizeAnnotationFn", reflect.TypeOf((*MockWriteBatch)(nil).SetFinalizeAnnotationFn), f) +} + +// Iter mocks base method +func (m *MockWriteBatch) Iter() []BatchWrite { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iter") + ret0, _ := ret[0].([]BatchWrite) + return ret0 +} + +// Iter indicates an expected call of Iter +func (mr *MockWriteBatchMockRecorder) Iter() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iter", reflect.TypeOf((*MockWriteBatch)(nil).Iter)) +} + +// SetPendingIndex mocks base method +func (m *MockWriteBatch) SetPendingIndex(idx int, pending PendingIndexInsert) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetPendingIndex", idx, pending) +} + +// SetPendingIndex indicates an expected call of SetPendingIndex +func (mr *MockWriteBatchMockRecorder) SetPendingIndex(idx, pending interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPendingIndex", reflect.TypeOf((*MockWriteBatch)(nil).SetPendingIndex), idx, pending) +} + +// PendingIndex mocks base method +func (m *MockWriteBatch) PendingIndex() []PendingIndexInsert { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PendingIndex") + ret0, _ := ret[0].([]PendingIndexInsert) + return ret0 +} + +// PendingIndex indicates an expected call of PendingIndex +func (mr *MockWriteBatchMockRecorder) PendingIndex() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingIndex", reflect.TypeOf((*MockWriteBatch)(nil).PendingIndex)) +} + +// SetError mocks base method +func (m *MockWriteBatch) SetError(idx int, err error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetError", idx, err) +} + +// SetError indicates an expected call of SetError +func (mr *MockWriteBatchMockRecorder) SetError(idx, err interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetError", reflect.TypeOf((*MockWriteBatch)(nil).SetError), idx, err) +} + +// SetSeries mocks base method +func (m *MockWriteBatch) SetSeries(idx int, series ts.Series) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSeries", idx, series) +} + +// SetSeries indicates an expected call of SetSeries +func (mr *MockWriteBatchMockRecorder) SetSeries(idx, series interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSeries", reflect.TypeOf((*MockWriteBatch)(nil).SetSeries), idx, series) +} + +// SetSkipWrite mocks base method +func (m *MockWriteBatch) SetSkipWrite(idx int) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSkipWrite", idx) +} + +// SetSkipWrite indicates an expected call of SetSkipWrite +func (mr *MockWriteBatchMockRecorder) SetSkipWrite(idx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSkipWrite", reflect.TypeOf((*MockWriteBatch)(nil).SetSkipWrite), idx) +} + +// Reset mocks base method +func (m *MockWriteBatch) Reset(batchSize int, ns ident.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset", batchSize, ns) +} + +// Reset indicates an expected call of Reset +func (mr *MockWriteBatchMockRecorder) Reset(batchSize, ns interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockWriteBatch)(nil).Reset), batchSize, ns) +} + +// Finalize mocks base method +func (m *MockWriteBatch) Finalize() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Finalize") +} + +// Finalize indicates an expected call of Finalize +func (mr *MockWriteBatchMockRecorder) Finalize() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Finalize", reflect.TypeOf((*MockWriteBatch)(nil).Finalize)) +} + +// cap mocks base method +func (m *MockWriteBatch) cap() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "cap") + ret0, _ := ret[0].(int) + return ret0 +} + +// cap indicates an expected call of cap +func (mr *MockWriteBatchMockRecorder) cap() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "cap", reflect.TypeOf((*MockWriteBatch)(nil).cap)) +} + +// MockBatchWriter is a mock of BatchWriter interface +type MockBatchWriter struct { + ctrl *gomock.Controller + recorder *MockBatchWriterMockRecorder +} + +// MockBatchWriterMockRecorder is the mock recorder for MockBatchWriter +type MockBatchWriterMockRecorder struct { + mock *MockBatchWriter +} + +// NewMockBatchWriter creates a new mock instance +func NewMockBatchWriter(ctrl *gomock.Controller) *MockBatchWriter { + mock := &MockBatchWriter{ctrl: ctrl} + mock.recorder = &MockBatchWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockBatchWriter) EXPECT() *MockBatchWriterMockRecorder { + return m.recorder +} + +// Add mocks base method +func (m *MockBatchWriter) Add(originalIndex int, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", originalIndex, id, timestamp, value, unit, annotation) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add +func (mr *MockBatchWriterMockRecorder) Add(originalIndex, id, timestamp, value, unit, annotation interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockBatchWriter)(nil).Add), originalIndex, id, timestamp, value, unit, annotation) +} + +// AddTagged mocks base method +func (m *MockBatchWriter) AddTagged(originalIndex int, id ident.ID, tags ident.TagIterator, encodedTags ts.EncodedTags, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddTagged", originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddTagged indicates an expected call of AddTagged +func (mr *MockBatchWriterMockRecorder) AddTagged(originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTagged", reflect.TypeOf((*MockBatchWriter)(nil).AddTagged), originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation) +} + +// SetFinalizeEncodedTagsFn mocks base method +func (m *MockBatchWriter) SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFinalizeEncodedTagsFn", f) +} + +// SetFinalizeEncodedTagsFn indicates an expected call of SetFinalizeEncodedTagsFn +func (mr *MockBatchWriterMockRecorder) SetFinalizeEncodedTagsFn(f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizeEncodedTagsFn", reflect.TypeOf((*MockBatchWriter)(nil).SetFinalizeEncodedTagsFn), f) +} + +// SetFinalizeAnnotationFn mocks base method +func (m *MockBatchWriter) SetFinalizeAnnotationFn(f FinalizeAnnotationFn) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFinalizeAnnotationFn", f) +} + +// SetFinalizeAnnotationFn indicates an expected call of SetFinalizeAnnotationFn +func (mr *MockBatchWriterMockRecorder) SetFinalizeAnnotationFn(f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizeAnnotationFn", reflect.TypeOf((*MockBatchWriter)(nil).SetFinalizeAnnotationFn), f) +} diff --git a/src/dbnode/x/xio/io_mock.go b/src/dbnode/x/xio/io_mock.go index 6005d38e3b..daa99ba481 100644 --- a/src/dbnode/x/xio/io_mock.go +++ b/src/dbnode/x/xio/io_mock.go @@ -1,6 +1,26 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/dbnode/x/xio (interfaces: SegmentReader,SegmentReaderPool) +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Package xio is a generated GoMock package. package xio From f9a063d35a321e797194fc3fca5a9c45b6d4428f Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 19:11:47 -0400 Subject: [PATCH 10/37] Remove debug sleep --- src/dbnode/integration/write_quorum_test.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/dbnode/integration/write_quorum_test.go b/src/dbnode/integration/write_quorum_test.go index b4a08de153..82f7cd96ec 100644 --- a/src/dbnode/integration/write_quorum_test.go +++ b/src/dbnode/integration/write_quorum_test.go @@ -214,9 +214,6 @@ func TestAddNodeQuorumAllUp(t *testing.T) { assert.NoError(t, testWrite(topology.ConsistencyLevelOne)) assert.NoError(t, testWrite(topology.ConsistencyLevelMajority)) assert.Error(t, testWrite(topology.ConsistencyLevelAll)) - - // debug - time.Sleep(10 * time.Minute) } type testWriteFn func(topology.ConsistencyLevel) error From 0b198d9bdd8cb7feaf6431d4fb044fcd7fc5cc1f Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 19:21:58 -0400 Subject: [PATCH 11/37] Fix tag slice duplicate --- src/x/ident/tag_iterator.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/x/ident/tag_iterator.go b/src/x/ident/tag_iterator.go index 760c75b377..f51508b0c3 100644 --- a/src/x/ident/tag_iterator.go +++ b/src/x/ident/tag_iterator.go @@ -177,17 +177,13 @@ func (i *tagSliceIter) Duplicate() TagIterator { } else { iter.ResetFields(i.backingSlice.fields) } - for j := 0; j <= i.currentIdx; j++ { iter.Next() } return iter } - return &tagSliceIter{ - backingSlice: i.backingSlice, - currentIdx: i.currentIdx, - currentTag: i.currentTag, - } + return newTagSliceIter(Tags{values: i.backingSlice.tags}, + i.backingSlice.fields, i.pool) } func (i *tagSliceIter) rewind() { From e0aef8346def51f7ac40d12d7719853bbbb054e2 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 19:30:14 -0400 Subject: [PATCH 12/37] Fix pending index insert --- src/dbnode/storage/shard.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index c680de0020..956c2151c1 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -981,7 +981,7 @@ func (s *dbShard) writeAndIndex( return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enable") } needsIndex = true - pendingIndexInsert = s.pendingIndexInsert(entry, timestamp) + pendingIndexInsert = s.pendingIndexInsert(result.entry, timestamp) } // NB(r): Make sure to use the copied ID which will eventually From b028037c0a997e9bdd67184b1731138dabc4625a Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 19:55:04 -0400 Subject: [PATCH 13/37] Only encode tags if set --- .../integration/integration_data_verify.go | 2 ++ src/dbnode/persist/fs/write.go | 23 +++++++++++-------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/dbnode/integration/integration_data_verify.go b/src/dbnode/integration/integration_data_verify.go index 9eb4c6a638..e889b596c1 100644 --- a/src/dbnode/integration/integration_data_verify.go +++ b/src/dbnode/integration/integration_data_verify.go @@ -202,6 +202,8 @@ func verifySeriesMapForRange( zap.String("id", id), zap.String("expectedTags", expected), zap.String("actualTags", actual), + zap.Any("expectedTagsErr", expectedTagsIter.Err()), + zap.Any("actualTagsErrr", actualTagsIter.Err()), ) } diff --git a/src/dbnode/persist/fs/write.go b/src/dbnode/persist/fs/write.go index 5d9dc42f0c..be0ea86254 100644 --- a/src/dbnode/persist/fs/write.go +++ b/src/dbnode/persist/fs/write.go @@ -466,14 +466,19 @@ func (w *writer) writeIndexFileContents( return err } - tagsEncoder.Reset() - if err := tagsEncoder.Encode(tagsIter); err != nil { - return err - } - - encodedTags, ok := tagsEncoder.Data() - if !ok { - return errWriterEncodeTagsDataNotAccessible + var encodedTags []byte + if numTags := tagsIter.Remaining(); numTags > 0 { + tagsEncoder.Reset() + if err := tagsEncoder.Encode(tagsIter); err != nil { + return err + } + + encodedTagsData, ok := tagsEncoder.Data() + if !ok { + return errWriterEncodeTagsDataNotAccessible + } + + encodedTags = encodedTagsData.Bytes() } entry := schema.IndexEntry{ @@ -482,7 +487,7 @@ func (w *writer) writeIndexFileContents( Size: int64(w.indexEntries[i].size), Offset: w.indexEntries[i].dataFileOffset, Checksum: int64(w.indexEntries[i].checksum), - EncodedTags: encodedTags.Bytes(), + EncodedTags: encodedTags, } w.encoder.Reset() From d6d4dc791c0b20ec4720ac1419b6daf8766cf652 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 21:57:04 -0400 Subject: [PATCH 14/37] Fix rewind --- src/dbnode/persist/fs/index_lookup_prop_test.go | 5 ++++- src/x/serialize/decoder.go | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/dbnode/persist/fs/index_lookup_prop_test.go b/src/dbnode/persist/fs/index_lookup_prop_test.go index 97e40bafc7..f8f3ba7bf5 100644 --- a/src/dbnode/persist/fs/index_lookup_prop_test.go +++ b/src/dbnode/persist/fs/index_lookup_prop_test.go @@ -32,6 +32,7 @@ import ( "time" "github.com/m3db/m3/src/dbnode/digest" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs/msgpack" "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/ident" @@ -164,7 +165,9 @@ func calculateExpectedChecksum(t *testing.T, filePath string) uint32 { func writeTestSummariesData(w DataFileSetWriter, writes []generatedWrite) error { for _, write := range writes { - err := w.Write(write.id, write.tags, write.data, write.checksum) + metadata := persist.NewMetadataSeriesIDAndTags(write.id, write.tags, + persist.MetadataOptions{}) + err := w.Write(metadata, write.data, write.checksum) if err != nil { return err } diff --git a/src/x/serialize/decoder.go b/src/x/serialize/decoder.go index 042de88da3..9d1b5d9dce 100644 --- a/src/x/serialize/decoder.go +++ b/src/x/serialize/decoder.go @@ -239,6 +239,9 @@ func (d *decoder) Duplicate() ident.TagIterator { } func (d *decoder) Rewind() { + if d.checkedData == nil { + return + } d.checkedData.IncRef() d.Reset(d.checkedData) d.checkedData.DecRef() From 487c4e1e15dee0b23a5861a750e3a3ed4e8b61db Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 22:54:42 -0400 Subject: [PATCH 15/37] Revert to using duplicate for newShardEntry --- src/dbnode/storage/shard.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index 956c2151c1..780ca81060 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -1173,8 +1173,7 @@ func (s *dbShard) newShardEntry( switch tagsArgOpts.arg { case tagsIterArg: // NB(r): Rewind so we record the tag iterator from the beginning. - tagsIter := tagsArgOpts.tagsIter - tagsIter.Rewind() + tagsIter := tagsArgOpts.tagsIter.Duplicate() // Pass nil for the identifier pool because the pool will force us to use an array // with a large capacity to store the tags. Since these tags are long-lived, it's From 6d73d82f40f69074f89a4742ce9a389970772f7f Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 23:27:15 -0400 Subject: [PATCH 16/37] Add reminder about bootstrap path --- src/dbnode/storage/shard.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index 780ca81060..eaa7a27b22 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -1028,6 +1028,12 @@ func (s *dbShard) SeriesReadWriteRef( }, nil } + // BEFORE MERGE: CONSIDER BELOW FOR NOW OR FOLLOWUP + // TODO(r): Probably can't insert series sync otherwise we stall a ton + // of writes... need a better solution for bootstrapping. + // This is what causes writes to degrade during bootstrap. + // This is a note to consider before merging this PR. + // NB(r): Insert synchronously so caller has access to the series // immediately, otherwise calls to LoadBlock(..) etc on the series itself // may have no effect if a collision with the same series From 7687c95c7895709ff7cbd67fc497771cd07c7a67 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 23:53:19 -0400 Subject: [PATCH 17/37] Update dashboard --- integrations/grafana/m3db_dashboard.json | 436 ++++++++++++++++++----- 1 file changed, 346 insertions(+), 90 deletions(-) diff --git a/integrations/grafana/m3db_dashboard.json b/integrations/grafana/m3db_dashboard.json index 1e316f76ab..135391e4e0 100644 --- a/integrations/grafana/m3db_dashboard.json +++ b/integrations/grafana/m3db_dashboard.json @@ -41,8 +41,7 @@ "editable": true, "gnetId": null, "graphTooltip": 1, - "id": null, - "iteration": 1538144350249, + "iteration": 1592797277408, "links": [], "panels": [ { @@ -104,6 +103,7 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, + "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -121,7 +121,7 @@ "lineColor": "rgb(31, 120, 193)", "show": true }, - "tableColumn": "Value", + "tableColumn": "", "targets": [ { "expr": "sum(database_bootstrapped{instance=~\"$instance\"} == bool 1)", @@ -146,7 +146,10 @@ } ], "valueName": "current", - "y_formats": ["short", "short"] + "y_formats": [ + "short", + "short" + ] }, { "cacheTimeout": null, @@ -193,6 +196,7 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, + "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -210,7 +214,7 @@ "lineColor": "rgb(31, 120, 193)", "show": true }, - "tableColumn": "Value", + "tableColumn": "", "targets": [ { "expr": "sum(database_bootstrapped{instance=~\"$instance\"} == bool 0)", @@ -234,7 +238,10 @@ } ], "valueName": "current", - "y_formats": ["short", "short"] + "y_formats": [ + "short", + "short" + ] }, { "cacheTimeout": null, @@ -281,6 +288,7 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, + "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -298,7 +306,7 @@ "lineColor": "rgb(31, 120, 193)", "show": true }, - "tableColumn": "revision", + "tableColumn": "", "targets": [ { "expr": "count(build_information{instance=~\"$instance\"}) by (revision)", @@ -322,7 +330,10 @@ } ], "valueName": "current", - "y_formats": ["short", "short"] + "y_formats": [ + "short", + "short" + ] }, { "cacheTimeout": null, @@ -369,6 +380,7 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, + "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -386,7 +398,7 @@ "lineColor": "rgb(31, 120, 193)", "show": true }, - "tableColumn": "go_version", + "tableColumn": "", "targets": [ { "expr": "count(build_information{instance=~\"$instance\"}) by (go_version)", @@ -410,7 +422,10 @@ } ], "valueName": "current", - "y_formats": ["short", "short"] + "y_formats": [ + "short", + "short" + ] }, { "collapsed": false, @@ -435,6 +450,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -442,6 +458,7 @@ "x": 0, "y": 5 }, + "hiddenSeries": false, "id": 1, "legend": { "avg": false, @@ -456,6 +473,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -483,6 +503,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Commit Log Queue Length", "tooltip": { @@ -529,6 +550,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -536,6 +558,7 @@ "x": 8, "y": 5 }, + "hiddenSeries": false, "id": 2, "legend": { "avg": false, @@ -550,6 +573,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -578,6 +604,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Commit Log Writes / Second (Includes replication)", "tooltip": { @@ -624,6 +651,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -631,6 +659,7 @@ "x": 16, "y": 5 }, + "hiddenSeries": false, "id": 45, "legend": { "avg": false, @@ -645,6 +674,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -667,6 +699,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "New Series Inserts / Second", "tooltip": { @@ -1034,6 +1067,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1041,6 +1075,7 @@ "x": 0, "y": 14 }, + "hiddenSeries": false, "id": 81, "legend": { "avg": false, @@ -1055,6 +1090,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1114,6 +1152,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Service Reads / Writes / Second", "tooltip": { @@ -1160,6 +1199,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1167,6 +1207,7 @@ "x": 8, "y": 14 }, + "hiddenSeries": false, "id": 82, "legend": { "avg": false, @@ -1181,6 +1222,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1200,6 +1244,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Server Side Write Latency (p99)", "tooltip": { @@ -1246,6 +1291,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1253,6 +1299,7 @@ "x": 16, "y": 14 }, + "hiddenSeries": false, "id": 83, "legend": { "avg": false, @@ -1267,6 +1314,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1287,6 +1337,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Server Side Fetch Latency (p99)", "tooltip": { @@ -1347,6 +1398,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1354,6 +1406,7 @@ "x": 0, "y": 22 }, + "hiddenSeries": false, "id": 13, "legend": { "avg": false, @@ -1368,6 +1421,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1389,6 +1445,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Process CPU Seconds", "tooltip": { @@ -1434,6 +1491,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1441,6 +1499,7 @@ "x": 8, "y": 22 }, + "hiddenSeries": false, "id": 14, "legend": { "avg": false, @@ -1455,6 +1514,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1487,6 +1549,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Memory Utilization - Resident", "tooltip": { @@ -1532,6 +1595,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1539,6 +1603,7 @@ "x": 16, "y": 22 }, + "hiddenSeries": false, "id": 110, "legend": { "avg": false, @@ -1553,6 +1618,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1575,6 +1643,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Memory - Mmaps (Anon and File)", "tooltip": { @@ -1620,6 +1689,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1627,6 +1697,7 @@ "x": 0, "y": 29 }, + "hiddenSeries": false, "id": 61, "legend": { "avg": false, @@ -1641,6 +1712,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1661,6 +1735,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Num File Descriptors", "tooltip": { @@ -1706,6 +1781,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1713,6 +1789,7 @@ "x": 8, "y": 29 }, + "hiddenSeries": false, "id": 49, "legend": { "avg": false, @@ -1727,6 +1804,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1747,6 +1827,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Num Go Routines", "tooltip": { @@ -1792,6 +1873,7 @@ "editable": true, "error": false, "fill": 0, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -1799,6 +1881,7 @@ "x": 16, "y": 29 }, + "hiddenSeries": false, "id": 39, "legend": { "avg": false, @@ -1813,6 +1896,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -1832,6 +1918,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Disk Free Space - Not Implemented", "tooltip": { @@ -4384,7 +4471,7 @@ "format": "time_series", "intervalFactor": 1, "key": 0.2814048282536148, - "refId": "B" + "refId": "A" } ], "thresholds": [], @@ -5126,11 +5213,11 @@ "steppedLine": false, "targets": [ { - "expr": "sum(build_information{}) by (build_version, revision)", + "expr": "sum(build_information{}) by (revision)", "format": "time_series", "intervalFactor": 1, "key": 0.5783520603949805, - "legendFormat": "{{build_version}} ({{revision}})", + "legendFormat": "{{revision}}", "refId": "A" } ], @@ -5194,12 +5281,14 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 7, "w": 8, "x": 0, "y": 46 }, + "hiddenSeries": false, "id": 69, "legend": { "avg": false, @@ -5214,6 +5303,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -5234,10 +5326,17 @@ "hide": false, "intervalFactor": 1, "refId": "A" + }, + { + "expr": "rate(dbindex_index_error{instance=~\"$instance\"}[$step])", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" } ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Index Queue", "tooltip": { @@ -5288,30 +5387,36 @@ "h": 7, "w": 8, "x": 8, - "y": 105 + "y": 46 }, "hiddenSeries": false, - "id": 116, + "id": 111, "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": true, + "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, + "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, - "pointradius": 2, + "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "name:index-error error_type:async-insert | sum", + "yaxis": 2 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, @@ -5319,6 +5424,7 @@ { "expr": "rate(dbindex_index_error{instance=~\"$instance\"}[$step])", "format": "time_series", + "hide": false, "intervalFactor": 1, "refId": "A" } @@ -5327,7 +5433,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Index Errors", + "title": "Indexing Errors", "tooltip": { "shared": true, "sort": 0, @@ -5370,16 +5476,16 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 0, + "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 8, "x": 16, - "y": 105 + "y": 46 }, "hiddenSeries": false, - "id": 79, + "id": 112, "legend": { "avg": false, "current": false, @@ -5400,19 +5506,27 @@ "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "name:index-error error_type:async-insert | sum", + "yaxis": 2 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "dbindex_insert_end_to_end_latency{instance=~\"$instance\",quantile=\"0.99\"}", + "expr": "rate(dbshard_insert_async_errors{instance=~\"$instance\"}[$step])", "format": "time_series", + "hide": false, "intervalFactor": 1, "refId": "A" }, { - "expr": "histogram_quantile(0.99, sum(rate(dbindex_insert_end_to_end_latency_bucket{instance=~\"$instance\"}[$step])) by (le, instance)) ", + "expr": "rate(dbshard_insert_queue_inserts_batch_errors{instance=~\"$instance\"}[$step])", + "format": "time_series", + "intervalFactor": 1, "refId": "B" } ], @@ -5420,9 +5534,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Indexing End-to-End Latency - P99", + "title": "Shard Insert Errors", "tooltip": { - "shared": false, + "shared": true, "sort": 0, "value_type": "individual" }, @@ -5436,7 +5550,7 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -5463,33 +5577,34 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 1, + "fill": 0, "fillGradient": 0, "gridPos": { "h": 7, "w": 8, "x": 0, - "y": 112 + "y": 53 }, "hiddenSeries": false, - "id": 114, + "id": 79, "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": true, + "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, + "links": [], "nullPointMode": "null", "options": { "dataLinks": [] }, "percentage": false, - "pointradius": 2, + "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -5498,15 +5613,13 @@ "steppedLine": false, "targets": [ { - "expr": "rate(dbshard_insert_async_errors{instance=~\"$instance\"}[$step])", + "expr": "dbindex_insert_end_to_end_latency{instance=~\"$instance\",quantile=\"0.99\"}", "format": "time_series", "intervalFactor": 1, "refId": "A" }, { - "expr": "rate(dbshard_insert_queue_inserts_batch_errors{instance=~\"$instance\"}[$step])", - "format": "time_series", - "intervalFactor": 1, + "expr": "histogram_quantile(0.99, sum(rate(dbindex_insert_end_to_end_latency_bucket{instance=~\"$instance\"}[$step])) by (le, instance)) ", "refId": "B" } ], @@ -5514,9 +5627,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Shard Insert Errors", + "title": "Indexing End-to-End Latency - P99", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -5530,7 +5643,7 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -5550,22 +5663,7 @@ "align": false, "alignLevel": null } - } - ], - "repeat": null, - "title": "Index Queue", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 46 - }, - "id": 108, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -5573,13 +5671,15 @@ "dashes": false, "datasource": "$datasource", "fill": 0, + "fillGradient": 0, "gridPos": { "h": 7, - "w": 12, - "x": 0, - "y": 47 + "w": 8, + "x": 8, + "y": 53 }, - "id": 85, + "hiddenSeries": false, + "id": 113, "legend": { "avg": false, "current": false, @@ -5593,6 +5693,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -5603,18 +5706,20 @@ "steppedLine": false, "targets": [ { - "expr": "database_tick_index_num_docs{instance=~\"$instance\"}", + "expr": "index_block_compaction_task_run_latency{instance=~\"$instance\",quantile=\"0.99\",compaction_type=\"foreground\"}", "format": "time_series", "intervalFactor": 1, + "legendFormat": "{{compaction_type}} {{instance}} p{{quantile}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, - "title": "Num Docs", + "title": "Indexing foreground compactions", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -5628,7 +5733,7 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -5656,13 +5761,15 @@ "dashes": false, "datasource": "$datasource", "fill": 0, + "fillGradient": 0, "gridPos": { "h": 7, - "w": 12, - "x": 12, - "y": 47 + "w": 8, + "x": 16, + "y": 53 }, - "id": 86, + "hiddenSeries": false, + "id": 114, "legend": { "avg": false, "current": false, @@ -5676,6 +5783,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -5686,18 +5796,20 @@ "steppedLine": false, "targets": [ { - "expr": "database_tick_index_num_segments{instance=~\"$instance\"}", + "expr": "index_block_compaction_task_run_latency{instance=~\"$instance\",quantile=\"0.99\",compaction_type=\"background\"}", "format": "time_series", "intervalFactor": 1, + "legendFormat": "{{compaction_type}} {{instance}} p{{quantile}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, - "title": "Num Segments", + "title": "Indexing background compactions", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -5711,7 +5823,7 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -5739,12 +5851,14 @@ "dashes": false, "datasource": "$datasource", "fill": 0, + "fillGradient": 0, "gridPos": { "h": 7, "w": 8, "x": 0, - "y": 54 + "y": 60 }, + "hiddenSeries": false, "id": 87, "legend": { "avg": false, @@ -5759,6 +5873,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -5769,9 +5886,10 @@ "steppedLine": false, "targets": [ { - "expr": "dbindex_num_active_compactions{instance=~\"$instance\"}", + "expr": "rate(index_block_compaction_plan_run_latency_count{instance=~\"$instance\"}[$step])", "format": "time_series", "intervalFactor": 1, + "legendFormat": "", "refId": "A" }, { @@ -5784,8 +5902,113 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Num Compactions", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "repeat": null, + "title": "Index Queue", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 46 + }, + "id": 108, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 68 + }, + "hiddenSeries": false, + "id": 85, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "database_tick_index_num_docs{instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Num Docs", "tooltip": { "shared": true, "sort": 0, @@ -5829,13 +6052,15 @@ "dashes": false, "datasource": "$datasource", "fill": 0, + "fillGradient": 0, "gridPos": { "h": 7, - "w": 8, - "x": 16, - "y": 54 + "w": 12, + "x": 12, + "y": 68 }, - "id": 88, + "hiddenSeries": false, + "id": 86, "legend": { "avg": false, "current": false, @@ -5849,6 +6074,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -5859,7 +6087,7 @@ "steppedLine": false, "targets": [ { - "expr": "dbindex_compaction_latency{instance=~\"$instance\"}", + "expr": "database_tick_index_num_segments{instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -5867,8 +6095,9 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, - "title": "Compaction Latency", + "title": "Num Segments", "tooltip": { "shared": true, "sort": 0, @@ -5884,7 +6113,7 @@ }, "yaxes": [ { - "format": "ms", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -5912,12 +6141,14 @@ "dashes": false, "datasource": "$datasource", "fill": 0, + "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 61 + "y": 75 }, + "hiddenSeries": false, "id": 74, "legend": { "avg": false, @@ -5932,6 +6163,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": true, @@ -5956,6 +6190,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Evicted/Sealed blocks", "tooltip": { @@ -6001,18 +6236,21 @@ } ], "refresh": false, - "schemaVersion": 16, + "schemaVersion": 22, "style": "dark", - "tags": ["disable-sync"], + "tags": [], "templating": { "list": [ { "current": { - "text": "M3Query - Prometheus", - "value": "M3Query - Prometheus" + "selected": false, + "text": "Prometheus", + "value": "Prometheus" }, "hide": 0, + "includeAll": false, "label": null, + "multi": false, "name": "datasource", "options": [], "query": "prometheus", @@ -6124,6 +6362,7 @@ ], "query": "series_pool,block_pool,encoder_pool,context_pool,iterator_pool,multi_iterator_pool,segment_reader_pool,bytes_pool,fetch_block_metadata_results_pool,fetch_blocks_metadata_results_pool,block_metadata_pool,block_metadata_slice_pool,blocks_metadata_pool,blocks_metadata_slice_pool,host_block_metadata_slice_pool,identifier_pool", "refresh": 0, + "skipUrlSync": false, "type": "custom" }, { @@ -6161,12 +6400,19 @@ } ], "query": "30s,1m,5m,10m", + "skipUrlSync": false, "type": "custom" }, { "allValue": null, - "current": {}, + "current": { + "text": "All", + "value": [ + "$__all" + ] + }, "datasource": "$datasource", + "definition": "", "hide": 0, "includeAll": true, "label": "instance", @@ -6176,6 +6422,7 @@ "query": "label_values(commitlog_writes_queued,instance)", "refresh": 2, "regex": "", + "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], @@ -6203,10 +6450,19 @@ "2h", "1d" ], - "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"] + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] }, "timezone": "browser", "title": "M3DB Node Details", "uid": "99SFck0iza", - "version": 4 -} + "version": 1 From 030d788699d3c4d5abf9e9f9cedd1b2f9cc8ea2a Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Sun, 21 Jun 2020 23:59:06 -0400 Subject: [PATCH 18/37] Add closing tag --- integrations/grafana/m3db_dashboard.json | 1 + 1 file changed, 1 insertion(+) diff --git a/integrations/grafana/m3db_dashboard.json b/integrations/grafana/m3db_dashboard.json index 135391e4e0..6bf240ce25 100644 --- a/integrations/grafana/m3db_dashboard.json +++ b/integrations/grafana/m3db_dashboard.json @@ -6466,3 +6466,4 @@ "title": "M3DB Node Details", "uid": "99SFck0iza", "version": 1 +} \ No newline at end of file From 2a7c95a2cb02cc92b285dd3e82ddff91983fc27f Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 16:04:17 -0400 Subject: [PATCH 19/37] Gen mocks and fix build for verify_data_files --- src/cmd/tools/verify_data_files/main/main.go | 51 ++++--------------- .../tools/verify_data_files/main/main_test.go | 26 +++++++--- src/query/cost/cost_mock.go | 34 ++++++------- 3 files changed, 47 insertions(+), 64 deletions(-) diff --git a/src/cmd/tools/verify_data_files/main/main.go b/src/cmd/tools/verify_data_files/main/main.go index 7c65cc0b17..9ac107f650 100644 --- a/src/cmd/tools/verify_data_files/main/main.go +++ b/src/cmd/tools/verify_data_files/main/main.go @@ -381,11 +381,9 @@ func fixFileSet( }() var ( - currTags []ident.Tag - currTagsIter = ident.NewTagsIterator(ident.Tags{}) - removedIDs int - removedTags int - copies []checked.Bytes + removedIDs int + removedTags int + copies []checked.Bytes ) for { id, tags, data, checksum, err := reader.Read() @@ -396,39 +394,9 @@ func fixFileSet( return err } - // Need to save tags in case we need to write them out again - // (iterating them in read entry means we can't reiterate them - // without copying/duplicating). - currTags = currTags[:0] - for tags.Next() { - tag := tags.Current() - name := tag.Name.Bytes() - value := tag.Value.Bytes() - - // Need to take copy as only valid during iteration. - nameCopy := opts.bytesPool.Get(len(name)) - nameCopy.IncRef() - nameCopy.AppendAll(name) - valueCopy := opts.bytesPool.Get(len(value)) - valueCopy.IncRef() - valueCopy.AppendAll(value) - copies = append(copies, nameCopy) - copies = append(copies, valueCopy) - - currTags = append(currTags, ident.Tag{ - Name: ident.BytesID(nameCopy.Bytes()), - Value: ident.BytesID(valueCopy.Bytes()), - }) - } - - // Choose to write out the current tags if do not need modifying. - writeTags := currTags[:] + tagsCopy := tags.Duplicate() - var currIdentTags ident.Tags - currIdentTags.Reset(currTags) - currTagsIter.Reset(currIdentTags) - - check, err := readEntry(id, currTagsIter, data, checksum) + check, err := readEntry(id, tags, data, checksum) if err != nil { shouldFixInvalidID := check.invalidID && opts.fixInvalidIDs shouldFixInvalidTags := check.invalidTags && opts.fixInvalidTags @@ -447,11 +415,14 @@ func fixFileSet( return fmt.Errorf("encountered an error not enabled to fix: %v", err) } - var writeIdentTags ident.Tags - writeIdentTags.Reset(writeTags) + metadata := persist.NewMetadataFromIDAndTagIterator(id, tagsCopy, + persist.MetadataOptions{ + FinalizeID: true, + FinalizeTagIterator: true, + }) data.IncRef() - err = writer.Write(id, writeIdentTags, data, checksum) + err = writer.Write(metadata, data, checksum) data.DecRef() if err != nil { return fmt.Errorf("could not write fixed file set entry: %v", err) diff --git a/src/cmd/tools/verify_data_files/main/main_test.go b/src/cmd/tools/verify_data_files/main/main_test.go index fcc4a54947..b46592533a 100644 --- a/src/cmd/tools/verify_data_files/main/main_test.go +++ b/src/cmd/tools/verify_data_files/main/main_test.go @@ -34,7 +34,7 @@ import ( "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/pool" - + "github.com/stretchr/testify/require" "go.uber.org/zap" ) @@ -120,12 +120,16 @@ func TestFixFileSetInvalidID(t *testing.T) { checksum := digest.Checksum(data.Bytes()) writer := testWriter.writer - err = writer.Write(id, ident.Tags{}, data, checksum) + metadata := persist.NewMetadataFromIDAndTags(id, ident.Tags{}, + persist.MetadataOptions{}) + err = writer.Write(metadata, data, checksum) require.NoError(t, err) // Write valid ID. id = ident.StringID("foo") - err = writer.Write(id, ident.Tags{}, data, checksum) + metadata = persist.NewMetadataFromIDAndTags(id, ident.Tags{}, + persist.MetadataOptions{}) + err = writer.Write(metadata, data, checksum) require.NoError(t, err) // Close volume. @@ -189,7 +193,9 @@ func TestFixFileSetInvalidTags(t *testing.T) { data.IncRef() checksum := digest.Checksum(data.Bytes()) - err = writer.Write(id, tags, data, checksum) + metadata := persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + err = writer.Write(metadata, data, checksum) require.NoError(t, err) // Write valid tags. @@ -208,7 +214,9 @@ func TestFixFileSetInvalidTags(t *testing.T) { data.IncRef() checksum = digest.Checksum(data.Bytes()) - err = writer.Write(id, tags, data, checksum) + metadata = persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + err = writer.Write(metadata, data, checksum) require.NoError(t, err) // Close volume. @@ -281,7 +289,9 @@ func TestFixFileSetInvalidChecksum(t *testing.T) { data.IncRef() checksum := digest.Checksum(data.Bytes()) + 1 - err = writer.Write(id, tags, data, checksum) + metadata := persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + err = writer.Write(metadata, data, checksum) require.NoError(t, err) // Write valid checksum. @@ -300,7 +310,9 @@ func TestFixFileSetInvalidChecksum(t *testing.T) { data.IncRef() checksum = digest.Checksum(data.Bytes()) - err = writer.Write(id, tags, data, checksum) + metadata = persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + err = writer.Write(metadata, data, checksum) require.NoError(t, err) // Close volume. diff --git a/src/query/cost/cost_mock.go b/src/query/cost/cost_mock.go index b0d92fae56..ecd1890b14 100644 --- a/src/query/cost/cost_mock.go +++ b/src/query/cost/cost_mock.go @@ -1,7 +1,7 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/query/cost/go -// Copyright (c) 2019 Uber Technologies, Inc. +// Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -27,7 +27,7 @@ package cost import ( "reflect" - cost0 "github.com/m3db/m3/src/x/cost" + "github.com/m3db/m3/src/x/cost" "github.com/golang/mock/gomock" ) @@ -56,10 +56,10 @@ func (m *MockChainedEnforcer) EXPECT() *MockChainedEnforcerMockRecorder { } // Add mocks base method -func (m *MockChainedEnforcer) Add(op cost0.Cost) cost0.Report { +func (m *MockChainedEnforcer) Add(op Cost) Report { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Add", op) - ret0, _ := ret[0].(cost0.Report) + ret0, _ := ret[0].(Report) return ret0 } @@ -70,11 +70,11 @@ func (mr *MockChainedEnforcerMockRecorder) Add(op interface{}) *gomock.Call { } // State mocks base method -func (m *MockChainedEnforcer) State() (cost0.Report, cost0.Limit) { +func (m *MockChainedEnforcer) State() (Report, Limit) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "State") - ret0, _ := ret[0].(cost0.Report) - ret1, _ := ret[1].(cost0.Limit) + ret0, _ := ret[0].(Report) + ret1, _ := ret[1].(Limit) return ret0, ret1 } @@ -85,10 +85,10 @@ func (mr *MockChainedEnforcerMockRecorder) State() *gomock.Call { } // Limit mocks base method -func (m *MockChainedEnforcer) Limit() cost0.Limit { +func (m *MockChainedEnforcer) Limit() Limit { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Limit") - ret0, _ := ret[0].(cost0.Limit) + ret0, _ := ret[0].(Limit) return ret0 } @@ -99,10 +99,10 @@ func (mr *MockChainedEnforcerMockRecorder) Limit() *gomock.Call { } // Clone mocks base method -func (m *MockChainedEnforcer) Clone() cost0.Enforcer { +func (m *MockChainedEnforcer) Clone() Enforcer { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Clone") - ret0, _ := ret[0].(cost0.Enforcer) + ret0, _ := ret[0].(Enforcer) return ret0 } @@ -113,10 +113,10 @@ func (mr *MockChainedEnforcerMockRecorder) Clone() *gomock.Call { } // Reporter mocks base method -func (m *MockChainedEnforcer) Reporter() cost0.EnforcerReporter { +func (m *MockChainedEnforcer) Reporter() EnforcerReporter { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Reporter") - ret0, _ := ret[0].(cost0.EnforcerReporter) + ret0, _ := ret[0].(EnforcerReporter) return ret0 } @@ -176,7 +176,7 @@ func (m *MockChainedReporter) EXPECT() *MockChainedReporterMockRecorder { } // ReportCost mocks base method -func (m *MockChainedReporter) ReportCost(c cost0.Cost) { +func (m *MockChainedReporter) ReportCost(c Cost) { m.ctrl.T.Helper() m.ctrl.Call(m, "ReportCost", c) } @@ -188,7 +188,7 @@ func (mr *MockChainedReporterMockRecorder) ReportCost(c interface{}) *gomock.Cal } // ReportCurrent mocks base method -func (m *MockChainedReporter) ReportCurrent(c cost0.Cost) { +func (m *MockChainedReporter) ReportCurrent(c Cost) { m.ctrl.T.Helper() m.ctrl.Call(m, "ReportCurrent", c) } @@ -212,7 +212,7 @@ func (mr *MockChainedReporterMockRecorder) ReportOverLimit(enabled interface{}) } // OnChildClose mocks base method -func (m *MockChainedReporter) OnChildClose(currentCost cost0.Cost) { +func (m *MockChainedReporter) OnChildClose(currentCost Cost) { m.ctrl.T.Helper() m.ctrl.Call(m, "OnChildClose", currentCost) } @@ -224,7 +224,7 @@ func (mr *MockChainedReporterMockRecorder) OnChildClose(currentCost interface{}) } // OnClose mocks base method -func (m *MockChainedReporter) OnClose(currentCost cost0.Cost) { +func (m *MockChainedReporter) OnClose(currentCost Cost) { m.ctrl.T.Helper() m.ctrl.Call(m, "OnClose", currentCost) } From 1aa05778d70300831487e2688fddef15e1e360fb Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 16:11:49 -0400 Subject: [PATCH 20/37] Fix query mock --- src/query/cost/cost_mock.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/query/cost/cost_mock.go b/src/query/cost/cost_mock.go index ecd1890b14..b0d92fae56 100644 --- a/src/query/cost/cost_mock.go +++ b/src/query/cost/cost_mock.go @@ -1,7 +1,7 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/m3db/m3/src/query/cost/go -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -27,7 +27,7 @@ package cost import ( "reflect" - "github.com/m3db/m3/src/x/cost" + cost0 "github.com/m3db/m3/src/x/cost" "github.com/golang/mock/gomock" ) @@ -56,10 +56,10 @@ func (m *MockChainedEnforcer) EXPECT() *MockChainedEnforcerMockRecorder { } // Add mocks base method -func (m *MockChainedEnforcer) Add(op Cost) Report { +func (m *MockChainedEnforcer) Add(op cost0.Cost) cost0.Report { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Add", op) - ret0, _ := ret[0].(Report) + ret0, _ := ret[0].(cost0.Report) return ret0 } @@ -70,11 +70,11 @@ func (mr *MockChainedEnforcerMockRecorder) Add(op interface{}) *gomock.Call { } // State mocks base method -func (m *MockChainedEnforcer) State() (Report, Limit) { +func (m *MockChainedEnforcer) State() (cost0.Report, cost0.Limit) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "State") - ret0, _ := ret[0].(Report) - ret1, _ := ret[1].(Limit) + ret0, _ := ret[0].(cost0.Report) + ret1, _ := ret[1].(cost0.Limit) return ret0, ret1 } @@ -85,10 +85,10 @@ func (mr *MockChainedEnforcerMockRecorder) State() *gomock.Call { } // Limit mocks base method -func (m *MockChainedEnforcer) Limit() Limit { +func (m *MockChainedEnforcer) Limit() cost0.Limit { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Limit") - ret0, _ := ret[0].(Limit) + ret0, _ := ret[0].(cost0.Limit) return ret0 } @@ -99,10 +99,10 @@ func (mr *MockChainedEnforcerMockRecorder) Limit() *gomock.Call { } // Clone mocks base method -func (m *MockChainedEnforcer) Clone() Enforcer { +func (m *MockChainedEnforcer) Clone() cost0.Enforcer { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Clone") - ret0, _ := ret[0].(Enforcer) + ret0, _ := ret[0].(cost0.Enforcer) return ret0 } @@ -113,10 +113,10 @@ func (mr *MockChainedEnforcerMockRecorder) Clone() *gomock.Call { } // Reporter mocks base method -func (m *MockChainedEnforcer) Reporter() EnforcerReporter { +func (m *MockChainedEnforcer) Reporter() cost0.EnforcerReporter { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Reporter") - ret0, _ := ret[0].(EnforcerReporter) + ret0, _ := ret[0].(cost0.EnforcerReporter) return ret0 } @@ -176,7 +176,7 @@ func (m *MockChainedReporter) EXPECT() *MockChainedReporterMockRecorder { } // ReportCost mocks base method -func (m *MockChainedReporter) ReportCost(c Cost) { +func (m *MockChainedReporter) ReportCost(c cost0.Cost) { m.ctrl.T.Helper() m.ctrl.Call(m, "ReportCost", c) } @@ -188,7 +188,7 @@ func (mr *MockChainedReporterMockRecorder) ReportCost(c interface{}) *gomock.Cal } // ReportCurrent mocks base method -func (m *MockChainedReporter) ReportCurrent(c Cost) { +func (m *MockChainedReporter) ReportCurrent(c cost0.Cost) { m.ctrl.T.Helper() m.ctrl.Call(m, "ReportCurrent", c) } @@ -212,7 +212,7 @@ func (mr *MockChainedReporterMockRecorder) ReportOverLimit(enabled interface{}) } // OnChildClose mocks base method -func (m *MockChainedReporter) OnChildClose(currentCost Cost) { +func (m *MockChainedReporter) OnChildClose(currentCost cost0.Cost) { m.ctrl.T.Helper() m.ctrl.Call(m, "OnChildClose", currentCost) } @@ -224,7 +224,7 @@ func (mr *MockChainedReporterMockRecorder) OnChildClose(currentCost interface{}) } // OnClose mocks base method -func (m *MockChainedReporter) OnClose(currentCost Cost) { +func (m *MockChainedReporter) OnClose(currentCost cost0.Cost) { m.ctrl.T.Helper() m.ctrl.Call(m, "OnClose", currentCost) } From 18486cf4c291546e34804e3996f03e962025bb24 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 16:32:38 -0400 Subject: [PATCH 21/37] Fix persist/fs package --- src/dbnode/persist/fs/merger_test.go | 16 ++--- src/dbnode/persist/fs/persist_manager_test.go | 40 +++++++---- src/dbnode/persist/fs/read_test.go | 29 +++++--- src/dbnode/persist/fs/read_write_test.go | 32 +++++---- src/dbnode/persist/fs/seek_test.go | 66 ++++++++++++++----- src/dbnode/persist/fs/write_test.go | 17 ++++- 6 files changed, 140 insertions(+), 60 deletions(-) diff --git a/src/dbnode/persist/fs/merger_test.go b/src/dbnode/persist/fs/merger_test.go index 4077bf0d8c..f8c7591e85 100644 --- a/src/dbnode/persist/fs/merger_test.go +++ b/src/dbnode/persist/fs/merger_test.go @@ -33,6 +33,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/ts" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" @@ -441,9 +442,9 @@ func testMergeWith( preparer := persist.NewMockFlushPreparer(ctrl) preparer.EXPECT().PrepareData(gomock.Any()).Return( persist.PreparedDataPersist{ - Persist: func(id ident.ID, tags ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persisted = append(persisted, persistedData{ - id: id, + metadata: metadata, // NB(bodu): Once data is persisted the `ts.Segment` gets finalized // so we can't read from it anymore or that violates the read after // free invariant. So we `Clone` the segment here. @@ -488,8 +489,8 @@ func assertPersistedAsExpected( require.Equal(t, expectedData.Len(), len(persisted)) for _, actualData := range persisted { - id := actualData.id - data, exists := expectedData.Get(id) + id := actualData.metadata.BytesID() + data, exists := expectedData.Get(ident.StringID(string(id))) require.True(t, exists) seg := ts.NewSegment(data, nil, 0, ts.FinalizeHead) @@ -531,7 +532,6 @@ func mockReaderFromData( ) *MockDataFileSetReader { reader := NewMockDataFileSetReader(ctrl) reader.EXPECT().Open(gomock.Any()).Return(nil) - reader.EXPECT().Entries().Return(diskData.Len()).Times(2) reader.EXPECT().Close().Return(nil) tagIter := ident.NewTagsIterator(ident.NewTags(ident.StringTag("tag-key0", "tag-val0"))) fakeChecksum := uint32(42) @@ -602,7 +602,7 @@ func mockMergeWithFromData( Start: startTime, Blocks: []xio.BlockReader{blockReaderFromData(data, segReader, startTime, blockSize)}, } - fn(id, ident.Tags{}, br) + fn(doc.Document{ID: id.Bytes()}, br) } } }) @@ -611,8 +611,8 @@ func mockMergeWithFromData( } type persistedData struct { - id ident.ID - segment ts.Segment + metadata persist.Metadata + segment ts.Segment } func datapointsFromSegment(t *testing.T, seg ts.Segment) []ts.Datapoint { diff --git a/src/dbnode/persist/fs/persist_manager_test.go b/src/dbnode/persist/fs/persist_manager_test.go index 0d68982960..b582680671 100644 --- a/src/dbnode/persist/fs/persist_manager_test.go +++ b/src/dbnode/persist/fs/persist_manager_test.go @@ -196,7 +196,9 @@ func TestPersistenceManagerPrepareSuccess(t *testing.T) { segment = ts.NewSegment(head, tail, 0, ts.FinalizeNone) checksum = segment.CalculateChecksum() ) - writer.EXPECT().WriteAll(id, tags, gomock.Any(), checksum).Return(nil) + metadata := persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + writer.EXPECT().WriteAll(metadata, gomock.Any(), checksum).Return(nil) writer.EXPECT().Close() flush, err := pm.StartFlushPersist() @@ -221,7 +223,7 @@ func TestPersistenceManagerPrepareSuccess(t *testing.T) { require.Nil(t, err) - require.Nil(t, prepared.Persist(id, tags, segment, checksum)) + require.Nil(t, prepared.Persist(metadata, segment, checksum)) require.True(t, pm.start.Equal(now)) require.Equal(t, 124, pm.count) @@ -266,7 +268,9 @@ func TestPersistenceManagerPrepareSnapshotSuccess(t *testing.T) { segment = ts.NewSegment(head, tail, 0, ts.FinalizeNone) checksum = segment.CalculateChecksum() ) - writer.EXPECT().WriteAll(id, tags, gomock.Any(), checksum).Return(nil) + metadata := persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + writer.EXPECT().WriteAll(metadata, gomock.Any(), checksum).Return(nil) writer.EXPECT().Close() flush, err := pm.StartSnapshotPersist(testSnapshotID) @@ -291,7 +295,7 @@ func TestPersistenceManagerPrepareSnapshotSuccess(t *testing.T) { require.Nil(t, err) - require.Nil(t, prepared.Persist(id, tags, segment, checksum)) + require.Nil(t, prepared.Persist(metadata, segment, checksum)) require.True(t, pm.start.Equal(now)) require.Equal(t, 124, pm.count) @@ -504,7 +508,12 @@ func TestPersistenceManagerNoRateLimit(t *testing.T) { pm.nowFn = func() time.Time { return now } pm.sleepFn = func(d time.Duration) { slept += d } - writer.EXPECT().WriteAll(id, tags, pm.dataPM.segmentHolder, checksum).Return(nil).Times(2) + metadata := persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + writer.EXPECT(). + WriteAll(metadata, pm.dataPM.segmentHolder, checksum). + Return(nil). + Times(2) flush, err := pm.StartFlushPersist() require.NoError(t, err) @@ -524,11 +533,11 @@ func TestPersistenceManagerNoRateLimit(t *testing.T) { // Start persistence now = time.Now() - require.NoError(t, prepared.Persist(id, tags, segment, checksum)) + require.NoError(t, prepared.Persist(metadata, segment, checksum)) // Advance time and write again now = now.Add(time.Millisecond) - require.NoError(t, prepared.Persist(id, tags, segment, checksum)) + require.NoError(t, prepared.Persist(metadata, segment, checksum)) // Check there is no rate limiting require.Equal(t, time.Duration(0), slept) @@ -567,8 +576,13 @@ func TestPersistenceManagerWithRateLimit(t *testing.T) { }, BlockSize: testBlockSize, }, m3test.IdentTransformer) + metadata := persist.NewMetadataFromIDAndTags(id, ident.Tags{}, + persist.MetadataOptions{}) writer.EXPECT().Open(writerOpts).Return(nil).Times(iter) - writer.EXPECT().WriteAll(id, ident.Tags{}, pm.dataPM.segmentHolder, checksum).Return(nil).AnyTimes() + writer.EXPECT(). + WriteAll(metadata, pm.dataPM.segmentHolder, checksum). + Return(nil). + AnyTimes() writer.EXPECT().Close().Times(iter) // Enable rate limiting @@ -607,21 +621,21 @@ func TestPersistenceManagerWithRateLimit(t *testing.T) { // Start persistence now = time.Now() - require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum)) + require.NoError(t, prepared.Persist(metadata, segment, checksum)) // Assert we don't rate limit if the count is not enough yet - require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum)) + require.NoError(t, prepared.Persist(metadata, segment, checksum)) require.Equal(t, time.Duration(0), slept) // Advance time and check we rate limit if the disk throughput exceeds the limit now = now.Add(time.Microsecond) - require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum)) + require.NoError(t, prepared.Persist(metadata, segment, checksum)) require.Equal(t, time.Duration(1861), slept) // Advance time and check we don't rate limit if the disk throughput is below the limit - require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum)) + require.NoError(t, prepared.Persist(metadata, segment, checksum)) now = now.Add(time.Second - time.Microsecond) - require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum)) + require.NoError(t, prepared.Persist(metadata, segment, checksum)) require.Equal(t, time.Duration(1861), slept) require.Equal(t, int64(15), pm.bytesWritten) diff --git a/src/dbnode/persist/fs/read_test.go b/src/dbnode/persist/fs/read_test.go index 073d7f4187..5f6c3ca6aa 100644 --- a/src/dbnode/persist/fs/read_test.go +++ b/src/dbnode/persist/fs/read_test.go @@ -31,6 +31,7 @@ import ( "time" "github.com/m3db/m3/src/dbnode/digest" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs/msgpack" "github.com/m3db/m3/src/dbnode/persist/schema" "github.com/m3db/m3/src/x/checked" @@ -168,10 +169,13 @@ func TestReadDataError(t *testing.T) { BlockStart: testWriterStart, }, } + metadata := persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}) err = w.Open(writerOpts) require.NoError(t, err) - require.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + require.NoError(t, w.Write(metadata, bytesRefd([]byte{1, 2, 3}), digest.Checksum([]byte{1, 2, 3}))) require.NoError(t, w.Close()) @@ -221,12 +225,15 @@ func TestReadDataUnexpectedSize(t *testing.T) { BlockStart: testWriterStart, }, } + metadata := persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}) err = w.Open(writerOpts) assert.NoError(t, err) dataFile := w.(*writer).dataFdWithDigest.Fd().Name() - assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + assert.NoError(t, w.Write(metadata, bytesRefd([]byte{1, 2, 3}), digest.Checksum([]byte{1, 2, 3}))) assert.NoError(t, w.Close()) @@ -308,10 +315,13 @@ func testReadOpen(t *testing.T, fileData map[string][]byte) { BlockStart: start, }, } + metadata := persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}) assert.NoError(t, w.Open(writerOpts)) - assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + assert.NoError(t, w.Write(metadata, bytesRefd([]byte{0x1}), digest.Checksum([]byte{0x1}))) assert.NoError(t, w.Close()) @@ -401,10 +411,13 @@ func TestReadValidate(t *testing.T) { BlockStart: start, }, } + metadata := persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}) require.NoError(t, w.Open(writerOpts)) - assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + assert.NoError(t, w.Write(metadata, bytesRefd([]byte{0x1}), digest.Checksum([]byte{0x1}))) require.NoError(t, w.Close()) diff --git a/src/dbnode/persist/fs/read_write_test.go b/src/dbnode/persist/fs/read_write_test.go index edc10276be..521f9df19f 100644 --- a/src/dbnode/persist/fs/read_write_test.go +++ b/src/dbnode/persist/fs/read_write_test.go @@ -124,9 +124,10 @@ func writeTestDataWithVolume( assert.NoError(t, err) for i := range entries { - assert.NoError(t, w.Write( - entries[i].ID(), + metadata := persist.NewMetadataFromIDAndTags(entries[i].ID(), entries[i].Tags(), + persist.MetadataOptions{}) + assert.NoError(t, w.Write(metadata, bytesRefd(entries[i].data), digest.Checksum(entries[i].data))) } @@ -141,8 +142,7 @@ func writeTestDataWithVolume( // Check every entry has ID and Tags nil for _, elem := range slice { - assert.Nil(t, elem.id) - assert.Nil(t, elem.tags.Values()) + assert.Equal(t, persist.Metadata{}, elem.metadata) } } @@ -301,9 +301,10 @@ func TestDuplicateWrite(t *testing.T) { require.NoError(t, err) for i := range entries { - require.NoError(t, w.Write( - entries[i].ID(), + metadata := persist.NewMetadataFromIDAndTags(entries[i].ID(), entries[i].Tags(), + persist.MetadataOptions{}) + require.NoError(t, w.Write(metadata, bytesRefd(entries[i].data), digest.Checksum(entries[i].data))) } @@ -457,19 +458,21 @@ func TestReusingWriterAfterWriteError(t *testing.T) { BlockStart: testWriterStart, }, } + metadata := persist.NewMetadataFromIDAndTags(entries[0].ID(), + entries[0].Tags(), + persist.MetadataOptions{}) require.NoError(t, w.Open(writerOpts)) - require.NoError(t, w.Write( - entries[0].ID(), - entries[0].Tags(), + require.NoError(t, w.Write(metadata, bytesRefd(entries[0].data), digest.Checksum(entries[0].data))) // Intentionally force a writer error. w.(*writer).err = errors.New("foo") - require.Equal(t, "foo", w.Write( - entries[1].ID(), + metadata = persist.NewMetadataFromIDAndTags(entries[1].ID(), entries[1].Tags(), + persist.MetadataOptions{}) + require.Equal(t, "foo", w.Write(metadata, bytesRefd(entries[1].data), digest.Checksum(entries[1].data)).Error()) w.Close() @@ -503,15 +506,20 @@ func TestWriterOnlyWritesNonNilBytes(t *testing.T) { BlockStart: testWriterStart, }, } + metadata := persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}) require.NoError(t, w.Open(writerOpts)) - w.WriteAll(ident.StringID("foo"), ident.Tags{}, + err := w.WriteAll(metadata, []checked.Bytes{ checkedBytes([]byte{1, 2, 3}), nil, checkedBytes([]byte{4, 5, 6}), }, digest.Checksum([]byte{1, 2, 3, 4, 5, 6})) + require.NoError(t, err) assert.NoError(t, w.Close()) diff --git a/src/dbnode/persist/fs/seek_test.go b/src/dbnode/persist/fs/seek_test.go index e57962e00f..6b9fa9db62 100644 --- a/src/dbnode/persist/fs/seek_test.go +++ b/src/dbnode/persist/fs/seek_test.go @@ -29,6 +29,7 @@ import ( "time" "github.com/m3db/m3/src/dbnode/digest" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/x/ident" "github.com/stretchr/testify/assert" @@ -89,12 +90,15 @@ func TestSeekDataUnexpectedSize(t *testing.T) { BlockStart: testWriterStart, }, } + metadata := persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}) err = w.Open(writerOpts) assert.NoError(t, err) dataFile := w.(*writer).dataFdWithDigest.Fd().Name() - assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + assert.NoError(t, w.Write(metadata, bytesRefd([]byte{1, 2, 3}), digest.Checksum([]byte{1, 2, 3}))) assert.NoError(t, w.Close()) @@ -136,7 +140,10 @@ func TestSeekBadChecksum(t *testing.T) { // Write data with wrong checksum assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 3}), digest.Checksum([]byte{1, 2, 4}))) assert.NoError(t, w.Close()) @@ -175,18 +182,24 @@ func TestSeek(t *testing.T) { err = w.Open(writerOpts) assert.NoError(t, err) assert.NoError(t, w.Write( - ident.StringID("foo1"), - ident.NewTags(ident.StringTag("num", "1")), + persist.NewMetadataFromIDAndTags( + ident.StringID("foo1"), + ident.NewTags(ident.StringTag("num", "1")), + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 1}), digest.Checksum([]byte{1, 2, 1}))) assert.NoError(t, w.Write( - ident.StringID("foo2"), - ident.NewTags(ident.StringTag("num", "2")), + persist.NewMetadataFromIDAndTags( + ident.StringID("foo2"), + ident.NewTags(ident.StringTag("num", "2")), + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 2}), digest.Checksum([]byte{1, 2, 2}))) assert.NoError(t, w.Write( - ident.StringID("foo3"), - ident.NewTags(ident.StringTag("num", "3")), + persist.NewMetadataFromIDAndTags( + ident.StringID("foo3"), + ident.NewTags(ident.StringTag("num", "3")), + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 3}), digest.Checksum([]byte{1, 2, 3}))) assert.NoError(t, w.Close()) @@ -246,15 +259,24 @@ func TestSeekIDNotExists(t *testing.T) { err = w.Open(writerOpts) assert.NoError(t, err) assert.NoError(t, w.Write( - ident.StringID("foo10"), ident.Tags{}, + persist.NewMetadataFromIDAndTags( + ident.StringID("foo10"), + ident.Tags{}, + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 1}), digest.Checksum([]byte{1, 2, 1}))) assert.NoError(t, w.Write( - ident.StringID("foo20"), ident.Tags{}, + persist.NewMetadataFromIDAndTags( + ident.StringID("foo20"), + ident.Tags{}, + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 2}), digest.Checksum([]byte{1, 2, 2}))) assert.NoError(t, w.Write( - ident.StringID("foo30"), ident.Tags{}, + persist.NewMetadataFromIDAndTags( + ident.StringID("foo30"), + ident.Tags{}, + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 3}), digest.Checksum([]byte{1, 2, 3}))) assert.NoError(t, w.Close()) @@ -300,7 +322,10 @@ func TestReuseSeeker(t *testing.T) { err = w.Open(writerOpts) assert.NoError(t, err) assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 1}), digest.Checksum([]byte{1, 2, 1}))) assert.NoError(t, w.Close()) @@ -316,7 +341,10 @@ func TestReuseSeeker(t *testing.T) { err = w.Open(writerOpts) assert.NoError(t, err) assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 3}), digest.Checksum([]byte{1, 2, 3}))) assert.NoError(t, w.Close()) @@ -365,7 +393,10 @@ func TestCloneSeeker(t *testing.T) { err = w.Open(writerOpts) assert.NoError(t, err) assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 1}), digest.Checksum([]byte{1, 2, 1}))) assert.NoError(t, w.Close()) @@ -381,7 +412,10 @@ func TestCloneSeeker(t *testing.T) { err = w.Open(writerOpts) assert.NoError(t, err) assert.NoError(t, w.Write( - ident.StringID("foo"), ident.Tags{}, + persist.NewMetadataFromIDAndTags( + ident.StringID("foo"), + ident.Tags{}, + persist.MetadataOptions{}), bytesRefd([]byte{1, 2, 3}), digest.Checksum([]byte{1, 2, 3}))) assert.NoError(t, w.Close()) diff --git a/src/dbnode/persist/fs/write_test.go b/src/dbnode/persist/fs/write_test.go index 2c8d718f1b..57ca954db8 100644 --- a/src/dbnode/persist/fs/write_test.go +++ b/src/dbnode/persist/fs/write_test.go @@ -43,7 +43,6 @@ func TestWriteReuseAfterError(t *testing.T) { filePathPrefix := filepath.Join(dir, "") defer os.RemoveAll(dir) - seriesID := ident.StringID("series1") w := newTestWriter(t, filePathPrefix) writerOpts := DataWriterOpenOptions{ Identifier: FileSetFileIdentifier{ @@ -58,8 +57,20 @@ func TestWriteReuseAfterError(t *testing.T) { data := checkedBytes([]byte{1, 2, 3}) require.NoError(t, w.Open(writerOpts)) - require.NoError(t, w.Write(seriesID, ident.Tags{}, data, 0)) - require.NoError(t, w.Write(seriesID, ident.Tags{}, data, 0)) + require.NoError(t, w.Write( + persist.NewMetadataFromIDAndTags( + ident.StringID("series1"), + ident.Tags{}, + persist.MetadataOptions{}), + data, + 0)) + require.NoError(t, w.Write( + persist.NewMetadataFromIDAndTags( + ident.StringID("series1"), + ident.Tags{}, + persist.MetadataOptions{}), + data, + 0)) require.Error(t, w.Close()) require.NoError(t, w.Open(writerOpts)) From 8698f185339960b873d86760196bbf4760cdfdc0 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 16:38:53 -0400 Subject: [PATCH 22/37] Fix peers test --- src/dbnode/persist/fs/write.go | 3 +- .../bootstrapper/peers/source_data_test.go | 30 +++++++++---------- .../bootstrapper/peers/source_index_test.go | 8 +++-- 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/src/dbnode/persist/fs/write.go b/src/dbnode/persist/fs/write.go index be0ea86254..3af518c812 100644 --- a/src/dbnode/persist/fs/write.go +++ b/src/dbnode/persist/fs/write.go @@ -29,14 +29,13 @@ import ( "sort" "time" - "github.com/m3db/m3/src/x/ident" - "github.com/m3db/bloom" "github.com/m3db/m3/src/dbnode/digest" "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs/msgpack" "github.com/m3db/m3/src/dbnode/persist/schema" "github.com/m3db/m3/src/x/checked" + "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/serialize" xtime "github.com/m3db/m3/src/x/time" diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go index e00a99b1e7..8675d5d49f 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go @@ -337,9 +337,9 @@ func TestPeersSourceRunWithPersist(t *testing.T) { flushPreparer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["foo"]++ - assert.Equal(t, "foo", id.String()) + assert.Equal(t, "foo", string(metadata.BytesID())) assert.Equal(t, []byte{1, 2, 3}, segment.Head.Bytes()) assertBlockChecksum(t, checksum, fooBlock) return nil @@ -358,9 +358,9 @@ func TestPeersSourceRunWithPersist(t *testing.T) { flushPreparer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["bar"]++ - assert.Equal(t, "bar", id.String()) + assert.Equal(t, "bar", string(metadata.BytesID())) assert.Equal(t, []byte{4, 5, 6}, segment.Head.Bytes()) assertBlockChecksum(t, checksum, barBlock) return nil @@ -379,9 +379,9 @@ func TestPeersSourceRunWithPersist(t *testing.T) { flushPreparer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["baz"]++ - assert.Equal(t, "baz", id.String()) + assert.Equal(t, "baz", string(metadata.BytesID())) assert.Equal(t, []byte{7, 8, 9}, segment.Head.Bytes()) assertBlockChecksum(t, checksum, bazBlock) return nil @@ -400,7 +400,7 @@ func TestPeersSourceRunWithPersist(t *testing.T) { flushPreparer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { assert.Fail(t, "no expected shard 1 second block") return nil }, @@ -578,7 +578,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) { flushPreprarer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { assert.Fail(t, "not expecting to flush shard 0 at start") return nil }, @@ -596,7 +596,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) { flushPreprarer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["foo"]++ return nil }, @@ -616,7 +616,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) { flushPreprarer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { assert.Fail(t, "not expecting to flush shard 0 at start + block size") return nil }, @@ -634,7 +634,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) { flushPreprarer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["bar"]++ return nil }, @@ -654,7 +654,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) { flushPreprarer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["baz"]++ return fmt.Errorf("a persist error") }, @@ -672,7 +672,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) { flushPreprarer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["baz"]++ return nil }, @@ -692,7 +692,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) { flushPreprarer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["qux"]++ return nil }, @@ -710,7 +710,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) { flushPreprarer.EXPECT(). PrepareData(prepareOpts). Return(persist.PreparedDataPersist{ - Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error { + Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error { persists["qux"]++ return nil }, diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go index 68c15c8740..17c1cc51b9 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go @@ -31,6 +31,7 @@ import ( "github.com/m3db/m3/src/dbnode/client" "github.com/m3db/m3/src/dbnode/digest" "github.com/m3db/m3/src/dbnode/namespace" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/dbnode/retention" "github.com/m3db/m3/src/dbnode/storage/block" @@ -122,8 +123,11 @@ func writeTSDBFiles( for _, v := range series { bytes := checked.NewBytes(v.data, nil) bytes.IncRef() - require.NoError(t, w.Write(ident.StringID(v.id), - sortedTagsFromTagsMap(v.tags), bytes, digest.Checksum(bytes.Bytes()))) + metadata := persist.NewMetadataFromIDAndTags(ident.StringID(v.id), + sortedTagsFromTagsMap(v.tags), + persist.MetadataOptions{}) + require.NoError(t, w.Write(metadata, bytes, + digest.Checksum(bytes.Bytes()))) bytes.DecRef() } From 8e9a2bd4915f275caf8ba6db0d506b20f9a692ad Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 16:43:34 -0400 Subject: [PATCH 23/37] Fix ts/writes package --- src/dbnode/ts/writes/write_batch_test.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/dbnode/ts/writes/write_batch_test.go b/src/dbnode/ts/writes/write_batch_test.go index b3ea6d0e90..e956cef5ef 100644 --- a/src/dbnode/ts/writes/write_batch_test.go +++ b/src/dbnode/ts/writes/write_batch_test.go @@ -184,14 +184,14 @@ func TestBatchWriterSetSeries(t *testing.T) { ) newSeries.ID = ident.StringID(fmt.Sprint(i)) - var err error if i == len(iter)-1 { // Set skip for this to true; it should revert to not skipping after // SetOutcome called below. - err = errors.New("some-error") - writeBatch.SetSkipWrite(i) + err := errors.New("some-error") + writeBatch.SetError(i, err) + } else { + writeBatch.SetSeries(i, newSeries) } - writeBatch.SetOutcome(i, newSeries, err) } iter = writeBatch.Iter() @@ -205,14 +205,17 @@ func TestBatchWriterSetSeries(t *testing.T) { currSeries = currWrite.Series i = j + 1 ) - require.Equal(t, fmt.Sprint(i), string(currSeries.ID.String())) - require.True(t, ident.StringID(fmt.Sprint(i)).Equal(currSeries.ID)) - require.False(t, curr.SkipWrite) if i == len(iter)-1 { require.Equal(t, errors.New("some-error"), curr.Err) - } else { - require.NoError(t, curr.Err) + require.True(t, curr.SkipWrite) + continue } + + require.Equal(t, fmt.Sprint(i), string(currSeries.ID.String())) + require.True(t, ident.StringID(fmt.Sprint(i)).Equal(currSeries.ID)) + require.False(t, curr.SkipWrite) + + require.NoError(t, curr.Err) } } From e287b8f3379b047dbedf9d908e5a7727815253c1 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 17:02:50 -0400 Subject: [PATCH 24/37] Fix storage tests --- .../storage/index_queue_forward_write_test.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/dbnode/storage/index_queue_forward_write_test.go b/src/dbnode/storage/index_queue_forward_write_test.go index ec406fbafa..e823c14876 100644 --- a/src/dbnode/storage/index_queue_forward_write_test.go +++ b/src/dbnode/storage/index_queue_forward_write_test.go @@ -31,6 +31,7 @@ import ( "github.com/m3db/m3/src/dbnode/runtime" "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/storage/series" + "github.com/m3db/m3/src/dbnode/ts/writes" xmetrics "github.com/m3db/m3/src/dbnode/x/metrics" "github.com/m3db/m3/src/m3ninx/doc" m3ninxidx "github.com/m3db/m3/src/m3ninx/idx" @@ -397,6 +398,7 @@ func writeToShard( ctx context.Context, t *testing.T, shard *dbShard, + idx NamespaceIndex, now time.Time, id string, shouldWrite bool, @@ -414,6 +416,12 @@ func writeToShard( }) require.NoError(t, err) require.Equal(t, shouldWrite, seriesWrite.WasWritten) + if seriesWrite.NeedsIndex { + err = idx.WritePending([]writes.PendingIndexInsert{ + seriesWrite.PendingIndexInsert, + }) + require.NoError(t, err) + } } func verifyShard( @@ -471,7 +479,7 @@ func writeToShardAndVerify( id string, shouldWrite bool, ) { - writeToShard(ctx, t, shard, now, id, shouldWrite) + writeToShard(ctx, t, shard, idx, now, id, shouldWrite) verifyShard(ctx, t, idx, now, next, id) } @@ -546,9 +554,9 @@ func testShardForwardWriteTaggedAsyncRefCount( ctx := context.NewContext() defer ctx.Close() - writeToShard(ctx, t, shard, now, "foo", true) - writeToShard(ctx, t, shard, now, "bar", true) - writeToShard(ctx, t, shard, now, "baz", true) + writeToShard(ctx, t, shard, idx, now, "foo", true) + writeToShard(ctx, t, shard, idx, now, "bar", true) + writeToShard(ctx, t, shard, idx, now, "baz", true) verifyShard(ctx, t, idx, now, next, "foo") verifyShard(ctx, t, idx, now, next, "bar") From 0affb08f0ae65f68ffd297707fbe0794f1c04c10 Mon Sep 17 00:00:00 2001 From: Bo Du Date: Tue, 30 Jun 2020 20:53:53 -0400 Subject: [PATCH 25/37] Fix tests. --- .../persist/fs/index_lookup_prop_test.go | 2 +- src/dbnode/persist/fs/retriever_test.go | 13 +- .../commitlog/source_index_test.go | 202 +----------------- .../storage/index_query_concurrent_test.go | 2 +- 4 files changed, 17 insertions(+), 202 deletions(-) diff --git a/src/dbnode/persist/fs/index_lookup_prop_test.go b/src/dbnode/persist/fs/index_lookup_prop_test.go index f8f3ba7bf5..8daabd66bd 100644 --- a/src/dbnode/persist/fs/index_lookup_prop_test.go +++ b/src/dbnode/persist/fs/index_lookup_prop_test.go @@ -165,7 +165,7 @@ func calculateExpectedChecksum(t *testing.T, filePath string) uint32 { func writeTestSummariesData(w DataFileSetWriter, writes []generatedWrite) error { for _, write := range writes { - metadata := persist.NewMetadataSeriesIDAndTags(write.id, write.tags, + metadata := persist.NewMetadataFromIDAndTags(write.id, write.tags, persist.MetadataOptions{}) err := w.Write(metadata, write.data, write.checksum) if err != nil { diff --git a/src/dbnode/persist/fs/retriever_test.go b/src/dbnode/persist/fs/retriever_test.go index 4d26192f42..b470e2af7f 100644 --- a/src/dbnode/persist/fs/retriever_test.go +++ b/src/dbnode/persist/fs/retriever_test.go @@ -36,6 +36,7 @@ import ( "github.com/m3db/m3/src/cluster/shard" "github.com/m3db/m3/src/dbnode/digest" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/sharding" "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/index/convert" @@ -299,7 +300,9 @@ func testBlockRetrieverHighConcurrentSeeks(t *testing.T, shouldCacheShardIndices } tags := testTagsFromIDAndVolume(id.String(), volume) - err := w.Write(id, tags, data, digest.Checksum(data.Bytes())) + metadata := persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + err := w.Write(metadata, data, digest.Checksum(data.Bytes())) require.NoError(t, err) } closer() @@ -558,7 +561,9 @@ func TestBlockRetrieverIDDoesNotExist(t *testing.T) { data := checked.NewBytes([]byte("Hello world!"), nil) data.IncRef() defer data.DecRef() - err = w.Write(ident.StringID("exists"), ident.Tags{}, data, digest.Checksum(data.Bytes())) + metadata := persist.NewMetadataFromIDAndTags(ident.StringID("exists"), ident.Tags{}, + persist.MetadataOptions{}) + err = w.Write(metadata, data, digest.Checksum(data.Bytes())) assert.NoError(t, err) closer() @@ -626,7 +631,9 @@ func TestBlockRetrieverOnlyCreatesTagItersIfTagsExists(t *testing.T) { data.IncRef() defer data.DecRef() - err = w.Write(ident.StringID(write.id), write.tags, data, digest.Checksum(data.Bytes())) + metadata := persist.NewMetadataFromIDAndTags(ident.StringID(write.id), write.tags, + persist.MetadataOptions{}) + err = w.Write(metadata, data, digest.Checksum(data.Bytes())) require.NoError(t, err) } closer() diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_index_test.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_index_test.go index eaa5e62511..eb0bf14de2 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_index_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_index_test.go @@ -21,7 +21,6 @@ package commitlog import ( - "fmt" "testing" "time" @@ -31,8 +30,6 @@ import ( "github.com/m3db/m3/src/dbnode/storage/bootstrap" "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" "github.com/m3db/m3/src/dbnode/ts" - idxpersist "github.com/m3db/m3/src/m3ninx/persist" - "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/pool" "github.com/m3db/m3/src/x/serialize" @@ -114,20 +111,20 @@ func TestBootstrapIndex(t *testing.T) { ID: ident.StringID("baz"), EncodedTags: bazTags} // Make sure we can handle series that don't have tags. untagged := ts.Series{UniqueIndex: 3, Namespace: testNamespaceID, - Shard: shardn(5), ID: ident.StringID("untagged"), Tags: ident.Tags{}} + Shard: shardn(5), ID: ident.StringID("untagged")} // Make sure we skip series that are not within the bootstrap range. outOfRange := ts.Series{UniqueIndex: 4, Namespace: testNamespaceID, - Shard: shardn(3), ID: ident.StringID("outOfRange"), Tags: ident.Tags{}} + Shard: shardn(3), ID: ident.StringID("outOfRange")} // Make sure we skip and dont panic on writes for shards that are higher than the maximum we're trying to bootstrap. shardTooHigh := ts.Series{UniqueIndex: 5, Namespace: testNamespaceID, - Shard: shardn(100), ID: ident.StringID("shardTooHigh"), Tags: ident.Tags{}} + Shard: shardn(100), ID: ident.StringID("shardTooHigh")} // Make sure we skip series for shards that have no requested bootstrap ranges. The shard for this write needs // to be less than the highest shard we actually plan to bootstrap. noShardBootstrapRange := ts.Series{UniqueIndex: 6, Namespace: testNamespaceID, - Shard: shardn(4), ID: ident.StringID("noShardBootstrapRange"), Tags: ident.Tags{}} + Shard: shardn(4), ID: ident.StringID("noShardBootstrapRange")} // Make sure it handles multiple namespaces someOtherNamespace := ts.Series{UniqueIndex: 7, Namespace: testNamespaceID2, - Shard: shardn(0), ID: ident.StringID("series_OtherNamespace"), Tags: ident.Tags{}} + Shard: shardn(0), ID: ident.StringID("series_OtherNamespace")} valuesNs := testValues{ {foo, start, 1.0, xtime.Second, nil}, @@ -233,192 +230,3 @@ func TestBootstrapIndexEmptyShardTimeRanges(t *testing.T) { tester.EnsureNoLoadedBlocks() tester.EnsureNoWrites() } - -func verifyIndexResultsAreCorrect( - values testValues, - seriesNotToExpect map[string]struct{}, - indexResults result.IndexResults, - indexBlockSize time.Duration, -) error { - expectedIndexBlocks := map[xtime.UnixNano]map[string]map[string]string{} - for _, value := range values { - if _, shouldNotExpect := seriesNotToExpect[value.s.ID.String()]; shouldNotExpect { - continue - } - - indexBlockStart := value.t.Truncate(indexBlockSize) - expectedSeries, ok := expectedIndexBlocks[xtime.ToUnixNano(indexBlockStart)] - if !ok { - expectedSeries = map[string]map[string]string{} - expectedIndexBlocks[xtime.ToUnixNano(indexBlockStart)] = expectedSeries - } - - seriesID := string(value.s.ID.Bytes()) - - existingTags, ok := expectedSeries[seriesID] - if !ok { - existingTags = map[string]string{} - expectedSeries[seriesID] = existingTags - } - for _, tag := range value.s.Tags.Values() { - existingTags[tag.Name.String()] = tag.Value.String() - } - } - - for indexBlockStart, expectedSeries := range expectedIndexBlocks { - indexBlockByVolumeType, ok := indexResults[indexBlockStart] - if !ok { - return fmt.Errorf("missing index block: %v", indexBlockStart.ToTime().String()) - } - indexBlock, ok := indexBlockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType) - if !ok { - return fmt.Errorf("missing index block: %v", indexBlockStart.ToTime().String()) - } - - if indexBlock.Fulfilled().IsEmpty() { - return fmt.Errorf("index-block %v fulfilled is empty", indexBlockStart) - } - - for _, seg := range indexBlock.Segments() { - reader, err := seg.Reader() - if err != nil { - return err - } - - docs, err := reader.AllDocs() - if err != nil { - return err - } - - seenSeries := map[string]struct{}{} - for docs.Next() { - curr := docs.Current() - - _, ok := seenSeries[string(curr.ID)] - if ok { - return fmt.Errorf( - "saw duplicate series: %v for block %v", - string(curr.ID), indexBlockStart.ToTime().String()) - } - seenSeries[string(curr.ID)] = struct{}{} - - expectedTags := expectedSeries[string(curr.ID)] - matchingTags := map[string]struct{}{} - for _, tag := range curr.Fields { - if _, ok := matchingTags[string(tag.Name)]; ok { - return fmt.Errorf("saw duplicate tag: %v for id: %v", tag.Name, string(curr.ID)) - } - matchingTags[string(tag.Name)] = struct{}{} - - tagValue, ok := expectedTags[string(tag.Name)] - if !ok { - return fmt.Errorf("saw unexpected tag: %v for id: %v", tag.Name, string(curr.ID)) - } - - if tagValue != string(tag.Value) { - return fmt.Errorf( - "tag values for series: %v do not match. Expected: %v but got: %v", - curr.ID, tagValue, string(tag.Value), - ) - } - } - - if len(expectedTags) != len(matchingTags) { - return fmt.Errorf( - "number of tags for series: %v do not match. Expected: %v, but got: %v", - string(curr.ID), len(expectedTags), len(matchingTags), - ) - } - } - - if docs.Err() != nil { - return docs.Err() - } - - if err := docs.Close(); err != nil { - return err - } - - if len(expectedSeries) != len(seenSeries) { - return fmt.Errorf( - "expected %v series, but got %v series", len(expectedSeries), len(seenSeries)) - } - } - } - - return nil -} - -func TestBootstrapIndexFailsForDecodedTags(t *testing.T) { - var ( - opts = testDefaultOpts - src = newCommitLogSource(opts, fs.Inspection{}).(*commitLogSource) - dataBlockSize = 2 * time.Hour - indexBlockSize = 4 * time.Hour - namespaceOptions = namespaceOptions. - SetRetentionOptions( - namespaceOptions. - RetentionOptions(). - SetBlockSize(dataBlockSize), - ). - SetIndexOptions( - namespaceOptions. - IndexOptions(). - SetBlockSize(indexBlockSize). - SetEnabled(true), - ) - ) - md1, err := namespace.NewMetadata(testNamespaceID, namespaceOptions) - require.NoError(t, err) - - now := time.Now() - start := now.Truncate(indexBlockSize) - - fooTags := ident.NewTags(ident.StringTag("city", "ny")) - - shardn := func(n int) uint32 { return uint32(n) } - foo := ts.Series{UniqueIndex: 0, Namespace: testNamespaceID, Shard: shardn(0), - ID: ident.StringID("foo"), Tags: fooTags} - - values := testValues{ - {foo, start, 1.0, xtime.Second, nil}, - } - - src.newIteratorFn = func( - _ commitlog.IteratorOpts, - ) (commitlog.Iterator, []commitlog.ErrorWithPath, error) { - return newTestCommitLogIterator(values, nil), nil, nil - } - - ranges := xtime.NewRanges( - xtime.Range{Start: start, End: start.Add(dataBlockSize)}, - xtime.Range{Start: start.Add(dataBlockSize), End: start.Add(2 * dataBlockSize)}, - xtime.Range{Start: start.Add(2 * dataBlockSize), End: start.Add(3 * dataBlockSize)}) - - // Don't include ranges for shard 4 as thats how we're testing the noShardBootstrapRange series. - targetRanges := result.NewShardTimeRanges().Set( - shardn(0), - ranges, - ).Set( - shardn(1), - ranges, - ).Set( - shardn(2), - ranges, - ).Set( - shardn(5), - ranges, - ) - - tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, targetRanges, md1) - defer tester.Finish() - - ctx := context.NewContext() - defer ctx.Close() - - _, err = src.Read(ctx, tester.Namespaces) - require.Error(t, err) - - tester.EnsureNoLoadedBlocks() - tester.EnsureNoWrites() -} diff --git a/src/dbnode/storage/index_query_concurrent_test.go b/src/dbnode/storage/index_query_concurrent_test.go index 07bb9eac75..f7f1172367 100644 --- a/src/dbnode/storage/index_query_concurrent_test.go +++ b/src/dbnode/storage/index_query_concurrent_test.go @@ -347,7 +347,7 @@ func testNamespaceIndexHighConcurrentQueries( for _, entry := range results.Results.Map().Iter() { id := entry.Key().String() - doc, err := convert.FromMetricIter(entry.Key(), entry.Value()) + doc, err := convert.FromSeriesIDAndTagIter(entry.Key(), entry.Value()) require.NoError(t, err) if err != nil { continue // this will fail the test anyway, but don't want to panic From d44094010e9badb2586c1a53583c937dfc06532b Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 21:12:10 -0400 Subject: [PATCH 26/37] Address feedback --- src/dbnode/persist/fs/write.go | 25 ++++++------- src/dbnode/persist/types.go | 37 +++++++++---------- src/dbnode/storage/database.go | 3 ++ .../storage/dirty_series_new_map_gen.go | 11 ++---- src/dbnode/storage/fs_merge_with_mem_test.go | 4 +- src/dbnode/storage/index.go | 3 +- src/dbnode/storage/index/convert/convert.go | 13 ++++--- src/dbnode/storage/index_insert_queue.go | 27 +++++++++----- src/dbnode/storage/namespace.go | 3 +- src/dbnode/storage/series/series.go | 4 ++ src/dbnode/storage/series/series_test.go | 5 +-- src/dbnode/storage/shard.go | 9 ++--- src/dbnode/storage/shard_test.go | 4 +- src/dbnode/storage/types.go | 4 ++ src/dbnode/ts/writes/write_batch_test.go | 1 - 15 files changed, 79 insertions(+), 74 deletions(-) diff --git a/src/dbnode/persist/fs/write.go b/src/dbnode/persist/fs/write.go index 3af518c812..4a992ac5b6 100644 --- a/src/dbnode/persist/fs/write.go +++ b/src/dbnode/persist/fs/write.go @@ -444,23 +444,22 @@ func (w *writer) writeIndexFileContents( sort.Sort(w.indexEntries) var ( - offset int64 - prevID []byte - opts = persist.MetadataTagIteratorOptions{ - ReuseableTagsIterator: w.tagsIterator, - } - tagsEncoder = w.tagEncoderPool.Get() + offset int64 + prevID []byte + tagsReuseable = w.tagsIterator + tagsEncoder = w.tagEncoderPool.Get() ) defer tagsEncoder.Finalize() - for i := range w.indexEntries { - id := w.indexEntries[i].metadata.BytesID() + for i, entry := range w.indexEntries { + metadata := entry.metadata + id := metadata.BytesID() // Need to check if i > 0 or we can never write an empty string ID if i > 0 && bytes.Equal(id, prevID) { // Should never happen, Write() should only be called once per ID return fmt.Errorf("encountered duplicate ID: %s", id) } - tagsIter, err := w.indexEntries[i].metadata.TagIterator(opts) + tagsIter, err := metadata.ResetOrReturnProvidedTagIterator(tagsReuseable) if err != nil { return err } @@ -481,11 +480,11 @@ func (w *writer) writeIndexFileContents( } entry := schema.IndexEntry{ - Index: w.indexEntries[i].index, + Index: entry.index, ID: id, - Size: int64(w.indexEntries[i].size), - Offset: w.indexEntries[i].dataFileOffset, - Checksum: int64(w.indexEntries[i].checksum), + Size: int64(entry.size), + Offset: entry.dataFileOffset, + Checksum: int64(entry.checksum), EncodedTags: encodedTags, } diff --git a/src/dbnode/persist/types.go b/src/dbnode/persist/types.go index 0e065b154d..0568e0099c 100644 --- a/src/dbnode/persist/types.go +++ b/src/dbnode/persist/types.go @@ -57,6 +57,10 @@ type MetadataOptions struct { } // NewMetadata returns a new metadata struct from series metadata. +// Note: because doc.Document has no pools for finalization we do not +// take MetadataOptions here, in future if we have pools or +// some other shared options that Metadata needs we will add it to this +// constructor as well. func NewMetadata(metadata doc.Document) Metadata { return Metadata{metadata: metadata} } @@ -97,21 +101,14 @@ func (m Metadata) BytesID() []byte { return m.metadata.ID } -// MetadataTagIteratorOptions are options required to be passed -// to metadata TagIterator to retrieve a tag iterator. -type MetadataTagIteratorOptions struct { - ReuseableTagsIterator ident.TagsIterator -} - -// TagIterator returns a tag iterator for the series, -// returning a direct ref to a provided tag iterator -// or using the reuseable tag iterator provided by the +// ResetOrReturnProvidedTagIterator returns a tag iterator +// for the series, returning a direct ref to a provided tag +// iterator or using the reuseable tag iterator provided by the // callsite if it needs to iterate over tags or fields. -func (m Metadata) TagIterator( - opts MetadataTagIteratorOptions, +func (m Metadata) ResetOrReturnProvidedTagIterator( + reuseableTagsIterator ident.TagsIterator, ) (ident.TagIterator, error) { - reuseable := opts.ReuseableTagsIterator - if reuseable == nil { + if reuseableTagsIterator == nil { // Always check to make sure callsites won't // get a bad allocation pattern of having // to create one here inline if the metadata @@ -124,24 +121,24 @@ func (m Metadata) TagIterator( } if len(m.tags.Values()) > 0 { - reuseable.Reset(m.tags) - return reuseable, nil + reuseableTagsIterator.Reset(m.tags) + return reuseableTagsIterator, reuseableTagsIterator.Err() } - reuseable.ResetFields(m.metadata.Fields) - return reuseable, nil + reuseableTagsIterator.ResetFields(m.metadata.Fields) + return reuseableTagsIterator, reuseableTagsIterator.Err() } // Finalize will finalize any resources that requested // to be finalized. func (m Metadata) Finalize() { - if m.opts.FinalizeID { + if m.opts.FinalizeID && m.id != nil { m.id.Finalize() } - if m.opts.FinalizeTags { + if m.opts.FinalizeTags && m.tags.Values() != nil { m.tags.Finalize() } - if m.opts.FinalizeTagIterator { + if m.opts.FinalizeTagIterator && m.tagsIter != nil { m.tagsIter.Close() } } diff --git a/src/dbnode/storage/database.go b/src/dbnode/storage/database.go index da90615829..bccaa7d6f1 100644 --- a/src/dbnode/storage/database.go +++ b/src/dbnode/storage/database.go @@ -760,6 +760,9 @@ func (d *db) writeBatch( err := n.WritePendingIndexInserts(pending) if err != nil { // Mark those as pending index with an error. + // Note: this is an invariant error, queueing should never fail + // when so it's fine to fail all these entries if we can't + // write pending index inserts. for i, write := range iter { if write.PendingIndex { errHandler.HandleError(write.OriginalIndex, err) diff --git a/src/dbnode/storage/dirty_series_new_map_gen.go b/src/dbnode/storage/dirty_series_new_map_gen.go index e72c083b6e..eb7a2a65cd 100644 --- a/src/dbnode/storage/dirty_series_new_map_gen.go +++ b/src/dbnode/storage/dirty_series_new_map_gen.go @@ -30,13 +30,8 @@ import ( "github.com/cespare/xxhash/v2" ) -// dirtySeriesMapOptions provides options used when created the map. -type dirtySeriesMapOptions struct { - InitialSize int -} - // newDirtySeriesMap returns a new byte keyed map. -func newDirtySeriesMap(opts dirtySeriesMapOptions) *dirtySeriesMap { +func newDirtySeriesMap() *dirtySeriesMap { return _dirtySeriesMapAlloc(_dirtySeriesMapOptions{ hash: func(k idAndBlockStart) dirtySeriesMapHash { hash := uint64(7) @@ -45,7 +40,8 @@ func newDirtySeriesMap(opts dirtySeriesMapOptions) *dirtySeriesMap { return dirtySeriesMapHash(hash) }, equals: func(x, y idAndBlockStart) bool { - return bytes.Equal(x.id, y.id) && x.blockStart == y.blockStart + // Note: Do cheap check (int comparison) first. + return x.blockStart == y.blockStart && bytes.Equal(x.id, y.id) }, copy: func(k idAndBlockStart) idAndBlockStart { return idAndBlockStart{ @@ -53,6 +49,5 @@ func newDirtySeriesMap(opts dirtySeriesMapOptions) *dirtySeriesMap { blockStart: k.blockStart, } }, - initialSize: opts.InitialSize, }) } diff --git a/src/dbnode/storage/fs_merge_with_mem_test.go b/src/dbnode/storage/fs_merge_with_mem_test.go index 1e64378245..3e982e1321 100644 --- a/src/dbnode/storage/fs_merge_with_mem_test.go +++ b/src/dbnode/storage/fs_merge_with_mem_test.go @@ -58,7 +58,7 @@ func TestRead(t *testing.T) { } retriever.EXPECT().RetrievableBlockColdVersion(gomock.Any()).Return(version, nil).AnyTimes() - dirtySeries := newDirtySeriesMap(dirtySeriesMapOptions{}) + dirtySeries := newDirtySeriesMap() dirtySeriesToWrite := make(map[xtime.UnixNano]*idList) data := []dirtyData{ @@ -141,7 +141,7 @@ func TestForEachRemaining(t *testing.T) { } retriever.EXPECT().RetrievableBlockColdVersion(gomock.Any()).Return(version, nil).AnyTimes() - dirtySeries := newDirtySeriesMap(dirtySeriesMapOptions{}) + dirtySeries := newDirtySeriesMap() dirtySeriesToWrite := make(map[xtime.UnixNano]*idList) id0 := ident.StringID("id0") diff --git a/src/dbnode/storage/index.go b/src/dbnode/storage/index.go index bced7113f4..c822756779 100644 --- a/src/dbnode/storage/index.go +++ b/src/dbnode/storage/index.go @@ -576,8 +576,7 @@ func (i *nsIndex) WritePending( if !i.isOpenWithRLock() { i.state.RUnlock() i.metrics.insertAfterClose.Inc(1) - err := errDbIndexUnableToWriteClosed - return err + return errDbIndexUnableToWriteClosed } _, err := i.state.insertQueue.InsertPending(pending) // release the lock because we don't need it past this point. diff --git a/src/dbnode/storage/index/convert/convert.go b/src/dbnode/storage/index/convert/convert.go index bf0d659bbf..7da0228e8e 100644 --- a/src/dbnode/storage/index/convert/convert.go +++ b/src/dbnode/storage/index/convert/convert.go @@ -49,12 +49,13 @@ var ( // Validate returns a bool indicating whether the document is valid. func Validate(d doc.Document) error { if !utf8.Valid(d.ID) { - return fmt.Errorf("document has invalid ID: id=%v, id_hex=%x", d.ID, d.ID) + return fmt.Errorf("document has invalid non-UTF8 ID: id=%v, id_hex=%x", + d.ID, d.ID) } for _, f := range d.Fields { if !utf8.Valid(f.Name) { - return fmt.Errorf("document has invalid field name: name=%v, name_hex=%x", + return fmt.Errorf("document has invalid non-UTF8 field name: name=%v, name_hex=%x", f.Name, f.Name) } @@ -63,7 +64,7 @@ func Validate(d doc.Document) error { } if !utf8.Valid(f.Value) { - return fmt.Errorf("document has invalid field value: value=%v, value_hex=%x", + return fmt.Errorf("document has invalid non-UTF8 field value: value=%v, value_hex=%x", f.Value, f.Value) } } @@ -74,7 +75,7 @@ func Validate(d doc.Document) error { // ValidateSeries will validate a series for use with m3ninx. func ValidateSeries(id ident.ID, tags ident.Tags) error { if idBytes := id.Bytes(); !utf8.Valid(idBytes) { - return fmt.Errorf("series has invalid ID: id=%s, id_hex=%x", + return fmt.Errorf("series has invalid non-UTF8 ID: id=%s, id_hex=%x", idBytes, idBytes) } for _, tag := range tags.Values() { @@ -93,11 +94,11 @@ func ValidateSeriesTag(tag ident.Tag) error { return ErrUsingReservedFieldName } if !utf8.Valid(tagName) { - return fmt.Errorf("series contains invalid field name: "+ + return fmt.Errorf("series contains invalid non-UTF8 field name: "+ "field=%s, field_hex=%v", tagName, tagName) } if !utf8.Valid(tagValue) { - return fmt.Errorf("series contains invalid field value: "+ + return fmt.Errorf("series contains invalid non-UTF8 field value: "+ "field=%s, field_value=%s, field_value_hex=%x", tagName, tagValue, tagValue) } diff --git a/src/dbnode/storage/index_insert_queue.go b/src/dbnode/storage/index_insert_queue.go index 70f9053dd5..1e0dd9209d 100644 --- a/src/dbnode/storage/index_insert_queue.go +++ b/src/dbnode/storage/index_insert_queue.go @@ -174,18 +174,21 @@ func (q *nsIndexInsertQueue) InsertBatch( ) (*sync.WaitGroup, error) { batchLen := batch.Len() - // Choose the queue relevant to current CPU index + // Choose the queue relevant to current CPU index. + // Note: since inserts by CPU core is allocated when + // nsIndexInsertBatch is constructed and then never modified + // it is safe to concurently read (but not modify obviously). inserts := q.currBatch.insertsByCPUCore[xsync.CPUCore()] inserts.Lock() inserts.shardInserts = append(inserts.shardInserts, batch) wg := inserts.wg inserts.Unlock() - // Notify insert loop + // Notify insert loop. select { case q.notifyInsert <- struct{}{}: default: - // Loop busy, already ready to consume notification + // Loop busy, already ready to consume notification. } q.metrics.numPending.Inc(int64(batchLen)) @@ -197,18 +200,21 @@ func (q *nsIndexInsertQueue) InsertPending( ) (*sync.WaitGroup, error) { batchLen := len(pending) - // Choose the queue relevant to current CPU index + // Choose the queue relevant to current CPU index. + // Note: since inserts by CPU core is allocated when + // nsIndexInsertBatch is constructed and then never modified + // it is safe to concurently read (but not modify obviously). inserts := q.currBatch.insertsByCPUCore[xsync.CPUCore()] inserts.Lock() inserts.batchInserts = append(inserts.batchInserts, pending...) wg := inserts.wg inserts.Unlock() - // Notify insert loop + // Notify insert loop. select { case q.notifyInsert <- struct{}{}: default: - // Loop busy, already ready to consume notification + // Loop busy, already ready to consume notification. } q.metrics.numPending.Inc(int64(batchLen)) @@ -255,9 +261,12 @@ func (q *nsIndexInsertQueue) Stop() error { type nsIndexInsertBatchFn func(inserts *index.WriteBatch) type nsIndexInsertBatch struct { - namespace namespace.Metadata - nowFn clock.NowFn - wg *sync.WaitGroup + namespace namespace.Metadata + nowFn clock.NowFn + wg *sync.WaitGroup + // Note: since inserts by CPU core is allocated when + // nsIndexInsertBatch is constructed and then never modified + // it is safe to concurently read (but not modify obviously). insertsByCPUCore []*nsIndexInsertsByCPUCore allInserts *index.WriteBatch allInsertsLastReset time.Time diff --git a/src/dbnode/storage/namespace.go b/src/dbnode/storage/namespace.go index b5d75c2033..b5b84bf8ac 100644 --- a/src/dbnode/storage/namespace.go +++ b/src/dbnode/storage/namespace.go @@ -1136,8 +1136,7 @@ func newColdFlushReuseableResources(opts Options) (coldFlushReuseableResources, } return coldFlushReuseableResources{ - // TODO(juchan): consider setting these options. - dirtySeries: newDirtySeriesMap(dirtySeriesMapOptions{}), + dirtySeries: newDirtySeriesMap(), dirtySeriesToWrite: make(map[xtime.UnixNano]*idList), // TODO(juchan): set pool options. idElementPool: newIDElementPool(nil), diff --git a/src/dbnode/storage/series/series.go b/src/dbnode/storage/series/series.go index d56dd20063..133d8df998 100644 --- a/src/dbnode/storage/series/series.go +++ b/src/dbnode/storage/series/series.go @@ -64,6 +64,10 @@ type dbSeries struct { // series metadata before changing ownership semantics (e.g. // pooling the ID rather than releasing it to the GC on // calling series.Reset()). + // Note: The bytes that back "id ident.ID" are the same bytes + // that are behind the ID in "metadata doc.Document", the whole + // reason we keep an ident.ID on the series is since there's a lot + // of existing callsites that require the ID as an ident.ID. id ident.ID metadata doc.Document uniqueIndex uint64 diff --git a/src/dbnode/storage/series/series_test.go b/src/dbnode/storage/series/series_test.go index 74e3fceac4..3d8476bfa8 100644 --- a/src/dbnode/storage/series/series_test.go +++ b/src/dbnode/storage/series/series_test.go @@ -27,14 +27,13 @@ import ( "testing" "time" - "github.com/m3db/m3/src/dbnode/persist" - "github.com/m3db/m3/src/dbnode/storage/index/convert" - "github.com/m3db/m3/src/dbnode/clock" "github.com/m3db/m3/src/dbnode/encoding" "github.com/m3db/m3/src/dbnode/encoding/m3tsz" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/retention" "github.com/m3db/m3/src/dbnode/storage/block" + "github.com/m3db/m3/src/dbnode/storage/index/convert" "github.com/m3db/m3/src/dbnode/ts" "github.com/m3db/m3/src/dbnode/x/xio" "github.com/m3db/m3/src/x/context" diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index eaa7a27b22..c4ecb427bb 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -1028,17 +1028,14 @@ func (s *dbShard) SeriesReadWriteRef( }, nil } - // BEFORE MERGE: CONSIDER BELOW FOR NOW OR FOLLOWUP - // TODO(r): Probably can't insert series sync otherwise we stall a ton - // of writes... need a better solution for bootstrapping. - // This is what causes writes to degrade during bootstrap. - // This is a note to consider before merging this PR. - // NB(r): Insert synchronously so caller has access to the series // immediately, otherwise calls to LoadBlock(..) etc on the series itself // may have no effect if a collision with the same series // being put in the insert queue may cause a block to be loaded to a // series which gets discarded. + // TODO(r): Probably can't insert series sync otherwise we stall a ton + // of writes... need a better solution for bootstrapping. + // This is what causes writes to degrade during bootstrap. at := s.nowFn() entry, err = s.insertSeriesSync(id, newTagsIterArg(tags), insertSyncOptions{ insertType: insertSyncIncReaderWriterCount, diff --git a/src/dbnode/storage/shard_test.go b/src/dbnode/storage/shard_test.go index 9248eafa63..a1c4501f49 100644 --- a/src/dbnode/storage/shard_test.go +++ b/src/dbnode/storage/shard_test.go @@ -621,7 +621,7 @@ func TestShardColdFlush(t *testing.T) { preparer := persist.NewMockFlushPreparer(ctrl) fsReader := fs.NewMockDataFileSetReader(ctrl) resources := coldFlushReuseableResources{ - dirtySeries: newDirtySeriesMap(dirtySeriesMapOptions{}), + dirtySeries: newDirtySeriesMap(), dirtySeriesToWrite: make(map[xtime.UnixNano]*idList), idElementPool: newIDElementPool(nil), fsReader: fsReader, @@ -694,7 +694,7 @@ func TestShardColdFlushNoMergeIfNothingDirty(t *testing.T) { dirtySeriesToWrite[xtime.ToUnixNano(t3)] = newIDList(idElementPool) resources := coldFlushReuseableResources{ - dirtySeries: newDirtySeriesMap(dirtySeriesMapOptions{}), + dirtySeries: newDirtySeriesMap(), dirtySeriesToWrite: dirtySeriesToWrite, idElementPool: idElementPool, fsReader: fsReader, diff --git a/src/dbnode/storage/types.go b/src/dbnode/storage/types.go index 9cd3ba2277..4068e69f19 100644 --- a/src/dbnode/storage/types.go +++ b/src/dbnode/storage/types.go @@ -704,6 +704,10 @@ type namespaceIndexInsertQueue interface { batch *index.WriteBatch, ) (*sync.WaitGroup, error) + // InsertPending inserts the provided documents to the index queue which processes + // inserts to the index asynchronously. It executes the provided callbacks + // based on the result of the execution. The returned wait group can be used + // if the insert is required to be synchronous. InsertPending( pending []writes.PendingIndexInsert, ) (*sync.WaitGroup, error) diff --git a/src/dbnode/ts/writes/write_batch_test.go b/src/dbnode/ts/writes/write_batch_test.go index e956cef5ef..7f5e7976ee 100644 --- a/src/dbnode/ts/writes/write_batch_test.go +++ b/src/dbnode/ts/writes/write_batch_test.go @@ -212,7 +212,6 @@ func TestBatchWriterSetSeries(t *testing.T) { } require.Equal(t, fmt.Sprint(i), string(currSeries.ID.String())) - require.True(t, ident.StringID(fmt.Sprint(i)).Equal(currSeries.ID)) require.False(t, curr.SkipWrite) require.NoError(t, curr.Err) From f5e1fae9fe8970ecefeae909181166955cd8a502 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 21:25:45 -0400 Subject: [PATCH 27/37] Fix mock --- src/query/cost/cost_mock.go | 156 +++++++++++++------------- src/query/generated/mocks/generate.go | 2 +- 2 files changed, 79 insertions(+), 79 deletions(-) diff --git a/src/query/cost/cost_mock.go b/src/query/cost/cost_mock.go index b0d92fae56..85abd1379b 100644 --- a/src/query/cost/cost_mock.go +++ b/src/query/cost/cost_mock.go @@ -1,7 +1,7 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/query/cost/go +// Source: github.com/m3db/m3/src/query/cost (interfaces: ChainedEnforcer,ChainedReporter) -// Copyright (c) 2019 Uber Technologies, Inc. +// Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -56,46 +56,31 @@ func (m *MockChainedEnforcer) EXPECT() *MockChainedEnforcerMockRecorder { } // Add mocks base method -func (m *MockChainedEnforcer) Add(op cost0.Cost) cost0.Report { +func (m *MockChainedEnforcer) Add(arg0 cost0.Cost) cost0.Report { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Add", op) + ret := m.ctrl.Call(m, "Add", arg0) ret0, _ := ret[0].(cost0.Report) return ret0 } // Add indicates an expected call of Add -func (mr *MockChainedEnforcerMockRecorder) Add(op interface{}) *gomock.Call { +func (mr *MockChainedEnforcerMockRecorder) Add(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockChainedEnforcer)(nil).Add), op) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockChainedEnforcer)(nil).Add), arg0) } -// State mocks base method -func (m *MockChainedEnforcer) State() (cost0.Report, cost0.Limit) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "State") - ret0, _ := ret[0].(cost0.Report) - ret1, _ := ret[1].(cost0.Limit) - return ret0, ret1 -} - -// State indicates an expected call of State -func (mr *MockChainedEnforcerMockRecorder) State() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "State", reflect.TypeOf((*MockChainedEnforcer)(nil).State)) -} - -// Limit mocks base method -func (m *MockChainedEnforcer) Limit() cost0.Limit { +// Child mocks base method +func (m *MockChainedEnforcer) Child(arg0 string) ChainedEnforcer { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Limit") - ret0, _ := ret[0].(cost0.Limit) + ret := m.ctrl.Call(m, "Child", arg0) + ret0, _ := ret[0].(ChainedEnforcer) return ret0 } -// Limit indicates an expected call of Limit -func (mr *MockChainedEnforcerMockRecorder) Limit() *gomock.Call { +// Child indicates an expected call of Child +func (mr *MockChainedEnforcerMockRecorder) Child(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Limit", reflect.TypeOf((*MockChainedEnforcer)(nil).Limit)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Child", reflect.TypeOf((*MockChainedEnforcer)(nil).Child), arg0) } // Clone mocks base method @@ -112,44 +97,59 @@ func (mr *MockChainedEnforcerMockRecorder) Clone() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clone", reflect.TypeOf((*MockChainedEnforcer)(nil).Clone)) } -// Reporter mocks base method -func (m *MockChainedEnforcer) Reporter() cost0.EnforcerReporter { +// Close mocks base method +func (m *MockChainedEnforcer) Close() { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Reporter") - ret0, _ := ret[0].(cost0.EnforcerReporter) + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close +func (mr *MockChainedEnforcerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockChainedEnforcer)(nil).Close)) +} + +// Limit mocks base method +func (m *MockChainedEnforcer) Limit() cost0.Limit { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Limit") + ret0, _ := ret[0].(cost0.Limit) return ret0 } -// Reporter indicates an expected call of Reporter -func (mr *MockChainedEnforcerMockRecorder) Reporter() *gomock.Call { +// Limit indicates an expected call of Limit +func (mr *MockChainedEnforcerMockRecorder) Limit() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reporter", reflect.TypeOf((*MockChainedEnforcer)(nil).Reporter)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Limit", reflect.TypeOf((*MockChainedEnforcer)(nil).Limit)) } -// Child mocks base method -func (m *MockChainedEnforcer) Child(resourceName string) ChainedEnforcer { +// Reporter mocks base method +func (m *MockChainedEnforcer) Reporter() cost0.EnforcerReporter { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Child", resourceName) - ret0, _ := ret[0].(ChainedEnforcer) + ret := m.ctrl.Call(m, "Reporter") + ret0, _ := ret[0].(cost0.EnforcerReporter) return ret0 } -// Child indicates an expected call of Child -func (mr *MockChainedEnforcerMockRecorder) Child(resourceName interface{}) *gomock.Call { +// Reporter indicates an expected call of Reporter +func (mr *MockChainedEnforcerMockRecorder) Reporter() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Child", reflect.TypeOf((*MockChainedEnforcer)(nil).Child), resourceName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reporter", reflect.TypeOf((*MockChainedEnforcer)(nil).Reporter)) } -// Close mocks base method -func (m *MockChainedEnforcer) Close() { +// State mocks base method +func (m *MockChainedEnforcer) State() (cost0.Report, cost0.Limit) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") + ret := m.ctrl.Call(m, "State") + ret0, _ := ret[0].(cost0.Report) + ret1, _ := ret[1].(cost0.Limit) + return ret0, ret1 } -// Close indicates an expected call of Close -func (mr *MockChainedEnforcerMockRecorder) Close() *gomock.Call { +// State indicates an expected call of State +func (mr *MockChainedEnforcerMockRecorder) State() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockChainedEnforcer)(nil).Close)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "State", reflect.TypeOf((*MockChainedEnforcer)(nil).State)) } // MockChainedReporter is a mock of ChainedReporter interface @@ -175,62 +175,62 @@ func (m *MockChainedReporter) EXPECT() *MockChainedReporterMockRecorder { return m.recorder } -// ReportCost mocks base method -func (m *MockChainedReporter) ReportCost(c cost0.Cost) { +// OnChildClose mocks base method +func (m *MockChainedReporter) OnChildClose(arg0 cost0.Cost) { m.ctrl.T.Helper() - m.ctrl.Call(m, "ReportCost", c) + m.ctrl.Call(m, "OnChildClose", arg0) } -// ReportCost indicates an expected call of ReportCost -func (mr *MockChainedReporterMockRecorder) ReportCost(c interface{}) *gomock.Call { +// OnChildClose indicates an expected call of OnChildClose +func (mr *MockChainedReporterMockRecorder) OnChildClose(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportCost", reflect.TypeOf((*MockChainedReporter)(nil).ReportCost), c) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnChildClose", reflect.TypeOf((*MockChainedReporter)(nil).OnChildClose), arg0) } -// ReportCurrent mocks base method -func (m *MockChainedReporter) ReportCurrent(c cost0.Cost) { +// OnClose mocks base method +func (m *MockChainedReporter) OnClose(arg0 cost0.Cost) { m.ctrl.T.Helper() - m.ctrl.Call(m, "ReportCurrent", c) + m.ctrl.Call(m, "OnClose", arg0) } -// ReportCurrent indicates an expected call of ReportCurrent -func (mr *MockChainedReporterMockRecorder) ReportCurrent(c interface{}) *gomock.Call { +// OnClose indicates an expected call of OnClose +func (mr *MockChainedReporterMockRecorder) OnClose(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportCurrent", reflect.TypeOf((*MockChainedReporter)(nil).ReportCurrent), c) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnClose", reflect.TypeOf((*MockChainedReporter)(nil).OnClose), arg0) } -// ReportOverLimit mocks base method -func (m *MockChainedReporter) ReportOverLimit(enabled bool) { +// ReportCost mocks base method +func (m *MockChainedReporter) ReportCost(arg0 cost0.Cost) { m.ctrl.T.Helper() - m.ctrl.Call(m, "ReportOverLimit", enabled) + m.ctrl.Call(m, "ReportCost", arg0) } -// ReportOverLimit indicates an expected call of ReportOverLimit -func (mr *MockChainedReporterMockRecorder) ReportOverLimit(enabled interface{}) *gomock.Call { +// ReportCost indicates an expected call of ReportCost +func (mr *MockChainedReporterMockRecorder) ReportCost(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportOverLimit", reflect.TypeOf((*MockChainedReporter)(nil).ReportOverLimit), enabled) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportCost", reflect.TypeOf((*MockChainedReporter)(nil).ReportCost), arg0) } -// OnChildClose mocks base method -func (m *MockChainedReporter) OnChildClose(currentCost cost0.Cost) { +// ReportCurrent mocks base method +func (m *MockChainedReporter) ReportCurrent(arg0 cost0.Cost) { m.ctrl.T.Helper() - m.ctrl.Call(m, "OnChildClose", currentCost) + m.ctrl.Call(m, "ReportCurrent", arg0) } -// OnChildClose indicates an expected call of OnChildClose -func (mr *MockChainedReporterMockRecorder) OnChildClose(currentCost interface{}) *gomock.Call { +// ReportCurrent indicates an expected call of ReportCurrent +func (mr *MockChainedReporterMockRecorder) ReportCurrent(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnChildClose", reflect.TypeOf((*MockChainedReporter)(nil).OnChildClose), currentCost) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportCurrent", reflect.TypeOf((*MockChainedReporter)(nil).ReportCurrent), arg0) } -// OnClose mocks base method -func (m *MockChainedReporter) OnClose(currentCost cost0.Cost) { +// ReportOverLimit mocks base method +func (m *MockChainedReporter) ReportOverLimit(arg0 bool) { m.ctrl.T.Helper() - m.ctrl.Call(m, "OnClose", currentCost) + m.ctrl.Call(m, "ReportOverLimit", arg0) } -// OnClose indicates an expected call of OnClose -func (mr *MockChainedReporterMockRecorder) OnClose(currentCost interface{}) *gomock.Call { +// ReportOverLimit indicates an expected call of ReportOverLimit +func (mr *MockChainedReporterMockRecorder) ReportOverLimit(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnClose", reflect.TypeOf((*MockChainedReporter)(nil).OnClose), currentCost) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportOverLimit", reflect.TypeOf((*MockChainedReporter)(nil).ReportOverLimit), arg0) } diff --git a/src/query/generated/mocks/generate.go b/src/query/generated/mocks/generate.go index 9f0a606f42..b92d9a6d98 100644 --- a/src/query/generated/mocks/generate.go +++ b/src/query/generated/mocks/generate.go @@ -27,11 +27,11 @@ //go:generate sh -c "mockgen -package=ingest -destination=$GOPATH/src/$PACKAGE/src/cmd/services/m3coordinator/ingest/write_mock.go $PACKAGE/src/cmd/services/m3coordinator/ingest DownsamplerAndWriter" //go:generate sh -c "mockgen -package=transform -destination=$GOPATH/src/$PACKAGE/src/query/executor/transform/types_mock.go $PACKAGE/src/query/executor/transform OpNode" //go:generate sh -c "mockgen -package=executor -destination=$GOPATH/src/$PACKAGE/src/query/executor/types_mock.go $PACKAGE/src/query/executor Engine" +//go:generate sh -c "mockgen -package=cost -destination=$GOPATH/src/github.com/m3db/m3/src/query/cost/cost_mock.go $PACKAGE/src/query/cost ChainedEnforcer,ChainedReporter" // mockgen rules for generating mocks for unexported interfaces (file mode). //go:generate sh -c "mockgen -package=m3ql -destination=$GOPATH/src/github.com/m3db/m3/src/query/parser/m3ql/types_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/parser/m3ql/types.go" //go:generate sh -c "mockgen -package=transform -destination=$GOPATH/src/github.com/m3db/m3/src/query/executor/transform/exec_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/executor/transform/exec.go" //go:generate sh -c "mockgen -package=temporal -destination=$GOPATH/src/github.com/m3db/m3/src/query/functions/temporal/dependencies_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/functions/temporal/dependencies.go" controller -//go:generate sh -c "mockgen -package=cost -destination=$GOPATH/src/github.com/m3db/m3/src/query/cost/cost_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/cost/cost.go" package mocks From 4a6805e0b426fe66d6bc39ca4fad1c2ed0ea2c4a Mon Sep 17 00:00:00 2001 From: Bo Du Date: Tue, 30 Jun 2020 21:27:01 -0400 Subject: [PATCH 28/37] Fix more tests. --- src/dbnode/persist/fs/clone/cloner_test.go | 5 +- .../persist/fs/commitlog/commit_log_test.go | 93 +++++++++++-------- 2 files changed, 59 insertions(+), 39 deletions(-) diff --git a/src/dbnode/persist/fs/clone/cloner_test.go b/src/dbnode/persist/fs/clone/cloner_test.go index b685b152e9..1ea8a8b447 100644 --- a/src/dbnode/persist/fs/clone/cloner_test.go +++ b/src/dbnode/persist/fs/clone/cloner_test.go @@ -29,6 +29,7 @@ import ( "testing" "time" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/ident" @@ -154,7 +155,9 @@ func writeTestData(t *testing.T, bs time.Duration, src FileSetID, opts Options) ident.StringTag("qux", "qaz"), ) } - require.NoError(t, w.Write(id, tags, testBytes, 1234)) + metadata := persist.NewMetadataFromIDAndTags(id, tags, + persist.MetadataOptions{}) + require.NoError(t, w.Write(metadata, testBytes, 1234)) } require.NoError(t, w.Close()) } diff --git a/src/dbnode/persist/fs/commitlog/commit_log_test.go b/src/dbnode/persist/fs/commitlog/commit_log_test.go index e661d2caaf..e56f8d7737 100644 --- a/src/dbnode/persist/fs/commitlog/commit_log_test.go +++ b/src/dbnode/persist/fs/commitlog/commit_log_test.go @@ -21,6 +21,7 @@ package commitlog import ( + "bytes" "errors" "fmt" "io/ioutil" @@ -37,6 +38,7 @@ import ( "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" xtime "github.com/m3db/m3/src/x/time" @@ -134,16 +136,31 @@ type testWrite struct { } func testSeries( + t *testing.T, + opts Options, uniqueIndex uint64, id string, tags ident.Tags, shard uint32, ) ts.Series { + var ( + tagEncoderPool = opts.FilesystemOptions().TagEncoderPool() + tagSliceIter = ident.NewTagsIterator(ident.Tags{}) + ) + tagSliceIter.Reset(tags) + + tagEncoder := tagEncoderPool.Get() + err := tagEncoder.Encode(tagSliceIter) + require.NoError(t, err) + + encodedTagsChecked, ok := tagEncoder.Data() + require.True(t, ok) + return ts.Series{ UniqueIndex: uniqueIndex, Namespace: ident.StringID("testNS"), ID: ident.StringID(id), - Tags: tags, + EncodedTags: ts.EncodedTags(encodedTagsChecked.Bytes()), Shard: shard, } } @@ -160,7 +177,7 @@ func (w testWrite) assert( require.Equal(t, w.series.Shard, series.Shard) // ident.Tags.Equal will compare length - require.True(t, w.series.Tags.Equal(series.Tags)) + require.True(t, bytes.Equal(w.series.EncodedTags, series.EncodedTags)) require.True(t, w.t.Equal(datapoint.Timestamp)) require.Equal(t, w.v, datapoint.Value) @@ -368,47 +385,47 @@ func TestCommitLogWrite(t *testing.T) { { "Attempt to perform 2 write log writes in parallel to a commit log", []testWrite{ - {testSeries(0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, []byte{1, 2, 3}, nil}, - {testSeries(1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, nil, nil}, + {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, []byte{1, 2, 3}, nil}, + {testSeries(t, opts, 1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, nil, nil}, }, }, { "Buffer almost full after first write. Second write almost fills the buffer", []testWrite{ - {testSeries(0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil}, - {testSeries(1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40), nil}, + {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil}, + {testSeries(t, opts, 1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40), nil}, }, }, { "Buffer almost full after first write. Second write almost fills 2*buffer total", []testWrite{ - {testSeries(0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil}, - {testSeries(1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40 + opts.FlushSize()), nil}, + {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil}, + {testSeries(t, opts, 1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40 + opts.FlushSize()), nil}, }, }, { "Buffer almost full after first write. Second write almost fills 3*buffer total", []testWrite{ - {testSeries(0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil}, - {testSeries(1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40 + 2*opts.FlushSize()), nil}, + {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil}, + {testSeries(t, opts, 1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40 + 2*opts.FlushSize()), nil}, }, }, { "Attempts to perform a write equal to the flush size", []testWrite{ - {testSeries(0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize()), nil}, + {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize()), nil}, }, }, { "Attempts to perform a write double the flush size", []testWrite{ - {testSeries(0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(2 * opts.FlushSize()), nil}, + {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(2 * opts.FlushSize()), nil}, }, }, { "Attempts to perform a write three times the flush size", []testWrite{ - {testSeries(0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(3 * opts.FlushSize()), nil}, + {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(3 * opts.FlushSize()), nil}, }, }, } @@ -457,7 +474,7 @@ func TestReadCommitLogMissingMetadata(t *testing.T) { allSeries := []ts.Series{} for i := 0; i < 200; i++ { willNotHaveMetadata := !(i%2 == 0) - allSeries = append(allSeries, testSeries( + allSeries = append(allSeries, testSeries(t, opts, uint64(i), "hax", ident.NewTags(ident.StringTag("name", "val")), @@ -515,8 +532,8 @@ func TestCommitLogReaderIsNotReusable(t *testing.T) { commitLog := newTestCommitLog(t, opts) writes := []testWrite{ - {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Second, []byte{1, 2, 3}, nil}, - {testSeries(1, "foo.baz", testTags2, 150), time.Now(), 456.789, xtime.Second, nil, nil}, + {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Second, []byte{1, 2, 3}, nil}, + {testSeries(t, opts, 1, "foo.baz", testTags2, 150), time.Now(), 456.789, xtime.Second, nil, nil}, } // Call write sync @@ -553,9 +570,9 @@ func TestCommitLogIteratorUsesPredicateFilterForNonCorruptFiles(t *testing.T) { // Writes spaced apart by block size. writes := []testWrite{ - {testSeries(0, "foo.bar", testTags1, 127), start, 123.456, xtime.Millisecond, nil, nil}, - {testSeries(1, "foo.baz", testTags2, 150), start.Add(1 * time.Second), 456.789, xtime.Millisecond, nil, nil}, - {testSeries(2, "foo.qux", testTags3, 291), start.Add(2 * time.Second), 789.123, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 0, "foo.bar", testTags1, 127), start, 123.456, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 1, "foo.baz", testTags2, 150), start.Add(1 * time.Second), 456.789, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 2, "foo.qux", testTags3, 291), start.Add(2 * time.Second), 789.123, xtime.Millisecond, nil, nil}, } defer cleanup(t, opts) @@ -671,9 +688,9 @@ func TestCommitLogWriteBehind(t *testing.T) { commitLog := newTestCommitLog(t, opts) writes := []testWrite{ - {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil}, - {testSeries(1, "foo.baz", testTags2, 150), time.Now(), 456.789, xtime.Millisecond, nil, nil}, - {testSeries(2, "foo.qux", testTags3, 291), time.Now(), 789.123, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 1, "foo.baz", testTags2, 150), time.Now(), 456.789, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 2, "foo.qux", testTags3, 291), time.Now(), 789.123, xtime.Millisecond, nil, nil}, } // Call write behind @@ -693,7 +710,7 @@ func TestCommitLogWriteErrorOnClosed(t *testing.T) { commitLog := newTestCommitLog(t, opts) require.NoError(t, commitLog.Close()) - series := testSeries(0, "foo.bar", testTags1, 127) + series := testSeries(t, opts, 0, "foo.bar", testTags1, 127) datapoint := ts.Datapoint{Timestamp: time.Now(), Value: 123.456} ctx := context.NewContext() @@ -719,7 +736,7 @@ func TestCommitLogWriteErrorOnFull(t *testing.T) { // Test filling queue var writes []testWrite - series := testSeries(0, "foo.bar", testTags1, 127) + series := testSeries(t, opts, 0, "foo.bar", testTags1, 127) dp := ts.Datapoint{Timestamp: time.Now(), Value: 123.456} unit := xtime.Millisecond @@ -762,7 +779,7 @@ func TestCommitLogQueueLength(t *testing.T) { defer commitLog.Close() var ( - series = testSeries(0, "foo.bar", testTags1, 127) + series = testSeries(t, opts, 0, "foo.bar", testTags1, 127) dp = ts.Datapoint{Timestamp: time.Now(), Value: 123.456} unit = xtime.Millisecond ctx = context.NewContext() @@ -819,7 +836,7 @@ func TestCommitLogFailOnWriteError(t *testing.T) { wg := setupCloseOnFail(t, commitLog) writes := []testWrite{ - {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil}, } writeCommitLogs(t, scope, commitLog, writes) @@ -868,7 +885,7 @@ func TestCommitLogFailOnOpenError(t *testing.T) { wg := setupCloseOnFail(t, commitLog) writes := []testWrite{ - {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil}, } writeCommitLogs(t, scope, commitLog, writes) @@ -924,7 +941,7 @@ func TestCommitLogFailOnFlushError(t *testing.T) { wg := setupCloseOnFail(t, commitLog) writes := []testWrite{ - {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil}, } writeCommitLogs(t, scope, commitLog, writes) @@ -987,9 +1004,9 @@ func TestCommitLogRotateLogs(t *testing.T) { // Writes spaced such that they should appear within the same commitlog block. writes := []testWrite{ - {testSeries(0, "foo.bar", testTags1, 127), start, 123.456, xtime.Millisecond, nil, nil}, - {testSeries(1, "foo.baz", testTags2, 150), start.Add(1 * time.Second), 456.789, xtime.Millisecond, nil, nil}, - {testSeries(2, "foo.qux", testTags3, 291), start.Add(2 * time.Second), 789.123, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 0, "foo.bar", testTags1, 127), start, 123.456, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 1, "foo.baz", testTags2, 150), start.Add(1 * time.Second), 456.789, xtime.Millisecond, nil, nil}, + {testSeries(t, opts, 2, "foo.qux", testTags3, 291), start.Add(2 * time.Second), 789.123, xtime.Millisecond, nil, nil}, } for i, write := range writes { @@ -1040,11 +1057,11 @@ func TestCommitLogBatchWriteDoesNotAddErroredOrSkippedSeries(t *testing.T) { defer cleanup(t, opts) commitLog := newTestCommitLog(t, opts) finalized := 0 - finalizeFn := func(_ ts.WriteBatch) { + finalizeFn := func(_ writes.WriteBatch) { finalized++ } - writes := ts.NewWriteBatch(4, ident.StringID("ns"), finalizeFn) + writes := writes.NewWriteBatch(4, ident.StringID("ns"), finalizeFn) alignedStart := time.Now().Truncate(time.Hour) for i := 0; i < 4; i++ { @@ -1053,9 +1070,9 @@ func TestCommitLogBatchWriteDoesNotAddErroredOrSkippedSeries(t *testing.T) { } writes.SetSkipWrite(0) - writes.SetOutcome(1, testSeries(1, "foo.bar", testTags1, 127), nil) - writes.SetOutcome(2, testSeries(2, "err.err", testTags2, 255), errors.New("oops")) - writes.SetOutcome(3, testSeries(3, "biz.qux", testTags3, 511), nil) + writes.SetSeries(1, testSeries(t, opts, 1, "foo.bar", testTags1, 127)) + writes.SetError(2, errors.New("oops")) + writes.SetSeries(3, testSeries(t, opts, 3, "biz.qux", testTags3, 511)) // Call write batch sync wg := sync.WaitGroup{} @@ -1095,8 +1112,8 @@ func TestCommitLogBatchWriteDoesNotAddErroredOrSkippedSeries(t *testing.T) { // Assert writes occurred by reading the commit log expected := []testWrite{ - {testSeries(1, "foo.bar", testTags1, 127), alignedStart.Add(time.Minute), 10.5, xtime.Second, nil, nil}, - {testSeries(3, "biz.qux", testTags3, 511), alignedStart.Add(time.Minute * 3), 31.5, xtime.Second, nil, nil}, + {testSeries(t, opts, 1, "foo.bar", testTags1, 127), alignedStart.Add(time.Minute), 10.5, xtime.Second, nil, nil}, + {testSeries(t, opts, 3, "biz.qux", testTags3, 511), alignedStart.Add(time.Minute * 3), 31.5, xtime.Second, nil, nil}, } assertCommitLogWritesByIterating(t, commitLog, expected) From 3d287b073f3cda65aeb51f33a7e8bc28bfdb5f1d Mon Sep 17 00:00:00 2001 From: Bo Du Date: Tue, 30 Jun 2020 21:40:01 -0400 Subject: [PATCH 29/37] Fix x/ident tests. --- src/x/ident/identifier_pool_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/x/ident/identifier_pool_test.go b/src/x/ident/identifier_pool_test.go index 42183cc7f5..f1a70fe832 100644 --- a/src/x/ident/identifier_pool_test.go +++ b/src/x/ident/identifier_pool_test.go @@ -185,7 +185,7 @@ func (s idPoolTestSuite) TestPoolGetTagsIterator() { ctx.BlockingClose() - s.Require().Nil(iter.(*tagSliceIter).backingSlice) + s.Require().Equal(tagsSlice{}, iter.(*tagSliceIter).backingSlice) s.Require().Equal(-1, iter.(*tagSliceIter).currentIdx) } @@ -206,7 +206,7 @@ func (s idPoolTestSuite) TestPoolTagsIterator() { iter.Close() - s.Require().Nil(iter.(*tagSliceIter).backingSlice) + s.Require().Equal(tagsSlice{}, iter.(*tagSliceIter).backingSlice) s.Require().Equal(-1, iter.(*tagSliceIter).currentIdx) } From c9d670df2c04fa58e0777488e59eff3047ad72e5 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 21:50:40 -0400 Subject: [PATCH 30/37] Fix TestCommitLogBatchWriteDoesNotAddErroredOrSkippedSeries --- .../persist/fs/commitlog/commit_log_test.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/dbnode/persist/fs/commitlog/commit_log_test.go b/src/dbnode/persist/fs/commitlog/commit_log_test.go index e56f8d7737..ea4894aeaa 100644 --- a/src/dbnode/persist/fs/commitlog/commit_log_test.go +++ b/src/dbnode/persist/fs/commitlog/commit_log_test.go @@ -39,6 +39,7 @@ import ( "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/dbnode/ts" "github.com/m3db/m3/src/dbnode/ts/writes" + "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" xtime "github.com/m3db/m3/src/x/time" @@ -1040,10 +1041,12 @@ func TestCommitLogRotateLogs(t *testing.T) { } var ( + testTag0 = ident.StringTag("name0", "val0") testTag1 = ident.StringTag("name1", "val1") testTag2 = ident.StringTag("name2", "val2") testTag3 = ident.StringTag("name3", "val3") + testTags0 = ident.NewTags(testTag0) testTags1 = ident.NewTags(testTag1) testTags2 = ident.NewTags(testTag2) testTags3 = ident.NewTags(testTag3) @@ -1063,14 +1066,24 @@ func TestCommitLogBatchWriteDoesNotAddErroredOrSkippedSeries(t *testing.T) { writes := writes.NewWriteBatch(4, ident.StringID("ns"), finalizeFn) + testSeriesWrites := []ts.Series{ + testSeries(t, opts, 0, "foo.bar", testTags0, 42), + testSeries(t, opts, 1, "foo.baz", testTags1, 127), + testSeries(t, opts, 2, "biz.qaz", testTags2, 321), + testSeries(t, opts, 3, "biz.qux", testTags3, 511), + } alignedStart := time.Now().Truncate(time.Hour) for i := 0; i < 4; i++ { tt := alignedStart.Add(time.Minute * time.Duration(i)) - writes.Add(i, ident.StringID(fmt.Sprint(i)), tt, float64(i)*10.5, xtime.Second, nil) + tagsIter := opts.FilesystemOptions().TagDecoderPool().Get() + tagsIter.Reset(checked.NewBytes(testSeriesWrites[i].EncodedTags, nil)) + writes.AddTagged(i, testSeriesWrites[i].ID, tagsIter, + testSeriesWrites[i].EncodedTags, + tt, float64(i)*10.5, xtime.Second, nil) } writes.SetSkipWrite(0) - writes.SetSeries(1, testSeries(t, opts, 1, "foo.bar", testTags1, 127)) + writes.SetSeries(1, testSeries(t, opts, 1, "foo.baz", testTags1, 127)) writes.SetError(2, errors.New("oops")) writes.SetSeries(3, testSeries(t, opts, 3, "biz.qux", testTags3, 511)) @@ -1112,7 +1125,7 @@ func TestCommitLogBatchWriteDoesNotAddErroredOrSkippedSeries(t *testing.T) { // Assert writes occurred by reading the commit log expected := []testWrite{ - {testSeries(t, opts, 1, "foo.bar", testTags1, 127), alignedStart.Add(time.Minute), 10.5, xtime.Second, nil, nil}, + {testSeries(t, opts, 1, "foo.baz", testTags1, 127), alignedStart.Add(time.Minute), 10.5, xtime.Second, nil, nil}, {testSeries(t, opts, 3, "biz.qux", testTags3, 511), alignedStart.Add(time.Minute * 3), 31.5, xtime.Second, nil, nil}, } From e7ab5523a2ed46c8c8ad86f9b51c5de3f61cdeef Mon Sep 17 00:00:00 2001 From: Bo Du Date: Tue, 30 Jun 2020 21:51:00 -0400 Subject: [PATCH 31/37] Fix commit log big tests. --- src/dbnode/persist/fs/commitlog/commit_log_conc_test.go | 4 ++-- src/dbnode/persist/fs/commitlog/read_write_prop_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/dbnode/persist/fs/commitlog/commit_log_conc_test.go b/src/dbnode/persist/fs/commitlog/commit_log_conc_test.go index 213808714a..57677a0c41 100644 --- a/src/dbnode/persist/fs/commitlog/commit_log_conc_test.go +++ b/src/dbnode/persist/fs/commitlog/commit_log_conc_test.go @@ -61,7 +61,7 @@ func TestCommitLogActiveLogsConcurrency(t *testing.T) { time.Sleep(time.Millisecond) err := commitLog.Write( context.NewContext(), - testSeries(0, "foo.bar", testTags1, 127), + testSeries(t, opts, 0, "foo.bar", testTags1, 127), ts.Datapoint{}, xtime.Second, nil) @@ -152,7 +152,7 @@ func TestCommitLogRotateLogsConcurrency(t *testing.T) { time.Sleep(time.Millisecond) err := commitLog.Write( context.NewContext(), - testSeries(0, "foo.bar", testTags1, 127), + testSeries(t, opts, 0, "foo.bar", testTags1, 127), ts.Datapoint{}, xtime.Second, nil) diff --git a/src/dbnode/persist/fs/commitlog/read_write_prop_test.go b/src/dbnode/persist/fs/commitlog/read_write_prop_test.go index ada6737cb1..a5c8e16067 100644 --- a/src/dbnode/persist/fs/commitlog/read_write_prop_test.go +++ b/src/dbnode/persist/fs/commitlog/read_write_prop_test.go @@ -23,6 +23,7 @@ package commitlog import ( + "bytes" "errors" "fmt" "io/ioutil" @@ -120,7 +121,7 @@ func TestCommitLogReadWrite(t *testing.T) { write := seriesWrites.writes[seriesWrites.readPosition] require.Equal(t, write.series.ID.String(), series.ID.String()) - require.True(t, write.series.Tags.Equal(series.Tags)) + require.True(t, bytes.Equal(write.series.EncodedTags, series.EncodedTags)) require.Equal(t, write.series.Namespace.String(), series.Namespace.String()) require.Equal(t, write.series.Shard, series.Shard) require.Equal(t, write.datapoint.Value, datapoint.Value) @@ -623,7 +624,6 @@ func genWrite() gopter.Gen { return generatedWrite{ series: ts.Series{ ID: ident.StringID(id), - Tags: seriesTags, EncodedTags: seriesEncodedTags, Namespace: ident.StringID(ns), Shard: shard, From 876b68e8da51b4b444501a4129ef2c3582b52e0b Mon Sep 17 00:00:00 2001 From: Bo Du Date: Tue, 30 Jun 2020 22:12:24 -0400 Subject: [PATCH 32/37] Fix commit log prop test. --- .../commitlog/source_prop_test.go | 39 ++++++++++++++----- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go index 19c680ab8e..3a2d682723 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go @@ -229,8 +229,10 @@ func TestCommitLogSourcePropCorrectlyBootstrapsFromCommitlog(t *testing.T) { for seriesID, data := range seriesForShard { checkedBytes := checked.NewBytes(data, nil) checkedBytes.IncRef() - tags := orderedWritesBySeries[seriesID][0].series.Tags - writer.Write(ident.StringID(seriesID), tags, checkedBytes, digest.Checksum(data)) + tags := orderedWritesBySeries[seriesID][0].tags + metadata := persist.NewMetadataFromIDAndTags(ident.StringID(seriesID), tags, + persist.MetadataOptions{}) + writer.Write(metadata, checkedBytes, digest.Checksum(data)) } err = writer.Close() @@ -451,7 +453,7 @@ func TestCommitLogSourcePropCorrectlyBootstrapsFromCommitlog(t *testing.T) { return true, nil }, - genPropTestInputs(nsMeta, startTime), + genPropTestInputs(t, nsMeta, startTime), )) if !props.Run(reporter) { @@ -476,6 +478,7 @@ type generatedWrite struct { // between time.Now().Add(-bufferFuture) and time.Now().Add(bufferPast). arrivedAt time.Time series ts.Series + tags ident.Tags datapoint ts.Datapoint unit xtime.Unit annotation ts.Annotation @@ -485,7 +488,7 @@ func (w generatedWrite) String() string { return fmt.Sprintf("ID = %v, Datapoint = %+v", w.series.ID.String(), w.datapoint) } -func genPropTestInputs(nsMeta namespace.Metadata, blockStart time.Time) gopter.Gen { +func genPropTestInputs(t *testing.T, nsMeta namespace.Metadata, blockStart time.Time) gopter.Gen { curriedGenPropTestInput := func(input interface{}) gopter.Gen { var ( inputs = input.([]interface{}) @@ -500,6 +503,7 @@ func genPropTestInputs(nsMeta namespace.Metadata, blockStart time.Time) gopter.G ) return genPropTestInput( + t, blockStart, bufferPast, bufferFuture, snapshotTime, snapshotExists, commitLogExists, numDatapoints, nsMeta.ID().String(), includeCorruptedCommitlogFile, multiNodeCluster) @@ -529,6 +533,7 @@ func genPropTestInputs(nsMeta namespace.Metadata, blockStart time.Time) gopter.G } func genPropTestInput( + t *testing.T, start time.Time, bufferPast, bufferFuture time.Duration, @@ -540,7 +545,7 @@ func genPropTestInput( includeCorruptedCommitlogFile bool, multiNodeCluster bool, ) gopter.Gen { - return gen.SliceOfN(numDatapoints, genWrite(start, bufferPast, bufferFuture, ns)). + return gen.SliceOfN(numDatapoints, genWrite(t, start, bufferPast, bufferFuture, ns)). Map(func(val []generatedWrite) propTestInput { return propTestInput{ currentTime: start, @@ -556,7 +561,7 @@ func genPropTestInput( }) } -func genWrite(start time.Time, bufferPast, bufferFuture time.Duration, ns string) gopter.Gen { +func genWrite(t *testing.T, start time.Time, bufferPast, bufferFuture time.Duration, ns string) gopter.Gen { latestDatapointTime := start.Truncate(blockSize).Add(blockSize).Sub(start) return gopter.CombineGens( @@ -579,13 +584,16 @@ func genWrite(start time.Time, bufferPast, bufferFuture time.Duration, ns string ).Map(func(val []interface{}) generatedWrite { var ( id = val[0].(string) - t = val[1].(time.Time) - a = t + tm = val[1].(time.Time) + a = tm bufferPastOrFuture = val[2].(bool) tagKey = val[3].(string) tagVal = val[4].(string) includeTags = val[5].(bool) v = val[6].(float64) + + tagEncoderPool = testCommitlogOpts.FilesystemOptions().TagEncoderPool() + tagSliceIter = ident.NewTagsIterator(ident.Tags{}) ) if bufferPastOrFuture { @@ -594,17 +602,28 @@ func genWrite(start time.Time, bufferPast, bufferFuture time.Duration, ns string a = a.Add(bufferPast) } + tags := seriesUniqueTags(id, tagKey, tagVal, includeTags) + tagSliceIter.Reset(tags) + + tagEncoder := tagEncoderPool.Get() + err := tagEncoder.Encode(tagSliceIter) + require.NoError(t, err) + + encodedTagsChecked, ok := tagEncoder.Data() + require.True(t, ok) + return generatedWrite{ arrivedAt: a, series: ts.Series{ ID: ident.StringID(id), - Tags: seriesUniqueTags(id, tagKey, tagVal, includeTags), Namespace: ident.StringID(ns), Shard: hashIDToShard(ident.StringID(id)), UniqueIndex: seriesUniqueIndex(id), + EncodedTags: ts.EncodedTags(encodedTagsChecked.Bytes()), }, + tags: tags, datapoint: ts.Datapoint{ - Timestamp: t, + Timestamp: tm, Value: v, }, unit: xtime.Nanosecond, From fee9fa2ab0fa5a5e1e566fed04882ef08d255b29 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 22:37:47 -0400 Subject: [PATCH 33/37] Fix dbnode/storage and dbnode/network/server/tchannelthrift/node --- .../tchannelthrift/node/service_test.go | 19 +- src/dbnode/storage/namespace_test.go | 17 +- src/dbnode/storage/shard_index_test.go | 198 ++++-------------- src/dbnode/storage/shard_ref_count_test.go | 9 + src/dbnode/storage/shard_test.go | 64 +----- 5 files changed, 76 insertions(+), 231 deletions(-) diff --git a/src/dbnode/network/server/tchannelthrift/node/service_test.go b/src/dbnode/network/server/tchannelthrift/node/service_test.go index 711a97e66d..9e43ae2561 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service_test.go +++ b/src/dbnode/network/server/tchannelthrift/node/service_test.go @@ -42,6 +42,7 @@ import ( "github.com/m3db/m3/src/dbnode/topology" "github.com/m3db/m3/src/dbnode/tracepoint" "github.com/m3db/m3/src/dbnode/ts" + "github.com/m3db/m3/src/dbnode/ts/writes" "github.com/m3db/m3/src/dbnode/x/xio" "github.com/m3db/m3/src/m3ninx/idx" "github.com/m3db/m3/src/x/checked" @@ -2250,7 +2251,7 @@ func TestServiceWriteBatchRaw(t *testing.T) { {"bar", time.Now().Truncate(time.Second), 42.42}, } - writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil) + writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil) mockDB.EXPECT(). BatchWriter(ident.NewIDMatcher(nsID), len(values)). Return(writeBatch, nil) @@ -2304,7 +2305,7 @@ func TestServiceWriteBatchRawV2SingleNS(t *testing.T) { {"bar", time.Now().Truncate(time.Second), 42.42}, } - writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil) + writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil) mockDB.EXPECT(). BatchWriter(ident.NewIDMatcher(nsID), len(values)). Return(writeBatch, nil) @@ -2361,8 +2362,8 @@ func TestServiceWriteBatchRawV2MultiNS(t *testing.T) { {"bar", time.Now().Truncate(time.Second), 42.42}, } - writeBatch1 = ts.NewWriteBatch(len(values), ident.StringID(nsID1), nil) - writeBatch2 = ts.NewWriteBatch(len(values), ident.StringID(nsID2), nil) + writeBatch1 = writes.NewWriteBatch(len(values), ident.StringID(nsID1), nil) + writeBatch2 = writes.NewWriteBatch(len(values), ident.StringID(nsID2), nil) ) mockDB.EXPECT(). @@ -2455,7 +2456,7 @@ func TestServiceWriteBatchRawOverMaxOutstandingRequests(t *testing.T) { testIsComplete = make(chan struct{}, 0) requestIsOutstanding = make(chan struct{}, 0) ) - writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil) + writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil) mockDB.EXPECT(). BatchWriter(ident.NewIDMatcher(nsID), len(values)). Do(func(nsID ident.ID, numValues int) { @@ -2563,7 +2564,7 @@ func TestServiceWriteTaggedBatchRaw(t *testing.T) { {"bar", "c|dd", time.Now().Truncate(time.Second), 42.42}, } - writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil) + writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil) mockDB.EXPECT(). BatchWriter(ident.NewIDMatcher(nsID), len(values)). Return(writeBatch, nil) @@ -2629,7 +2630,7 @@ func TestServiceWriteTaggedBatchRawV2(t *testing.T) { {"bar", "c|dd", time.Now().Truncate(time.Second), 42.42}, } - writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil) + writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil) mockDB.EXPECT(). BatchWriter(ident.NewIDMatcher(nsID), len(values)). Return(writeBatch, nil) @@ -2696,8 +2697,8 @@ func TestServiceWriteTaggedBatchRawV2MultiNS(t *testing.T) { {"foo", "a|b", time.Now().Truncate(time.Second), 12.34}, {"bar", "c|dd", time.Now().Truncate(time.Second), 42.42}, } - writeBatch1 = ts.NewWriteBatch(len(values), ident.StringID(nsID1), nil) - writeBatch2 = ts.NewWriteBatch(len(values), ident.StringID(nsID2), nil) + writeBatch1 = writes.NewWriteBatch(len(values), ident.StringID(nsID1), nil) + writeBatch2 = writes.NewWriteBatch(len(values), ident.StringID(nsID2), nil) ) mockDB.EXPECT(). diff --git a/src/dbnode/storage/namespace_test.go b/src/dbnode/storage/namespace_test.go index 83325af8ee..6f6018d0ef 100644 --- a/src/dbnode/storage/namespace_test.go +++ b/src/dbnode/storage/namespace_test.go @@ -39,7 +39,6 @@ import ( "github.com/m3db/m3/src/dbnode/storage/repair" "github.com/m3db/m3/src/dbnode/storage/series" "github.com/m3db/m3/src/dbnode/tracepoint" - "github.com/m3db/m3/src/dbnode/ts" xmetrics "github.com/m3db/m3/src/dbnode/x/metrics" xidx "github.com/m3db/m3/src/m3ninx/idx" "github.com/m3db/m3/src/x/context" @@ -221,9 +220,9 @@ func TestNamespaceWriteShardOwned(t *testing.T) { TruncateType: truncateType, } shard.EXPECT().Write(ctx, id, now, val, unit, ant, opts). - Return(ts.Series{}, true, nil).Times(1) + Return(SeriesWrite{WasWritten: true}, nil).Times(1) shard.EXPECT().Write(ctx, id, now, val, unit, ant, opts). - Return(ts.Series{}, false, nil).Times(1) + Return(SeriesWrite{WasWritten: false}, nil).Times(1) ns.shards[testShardIDs[0].ID()] = shard @@ -1109,10 +1108,14 @@ func TestNamespaceIndexInsert(t *testing.T) { opts := series.WriteOptions{ TruncateType: truncateType, } - shard.EXPECT().WriteTagged(ctx, ident.NewIDMatcher("a"), ident.EmptyTagIterator, - now, 1.0, xtime.Second, nil, opts).Return(ts.Series{}, true, nil) - shard.EXPECT().WriteTagged(ctx, ident.NewIDMatcher("a"), ident.EmptyTagIterator, - now, 1.0, xtime.Second, nil, opts).Return(ts.Series{}, false, nil) + shard.EXPECT(). + WriteTagged(ctx, ident.NewIDMatcher("a"), ident.EmptyTagIterator, + now, 1.0, xtime.Second, nil, opts). + Return(SeriesWrite{WasWritten: true}, nil) + shard.EXPECT(). + WriteTagged(ctx, ident.NewIDMatcher("a"), ident.EmptyTagIterator, + now, 1.0, xtime.Second, nil, opts). + Return(SeriesWrite{WasWritten: false}, nil) ns.shards[testShardIDs[0].ID()] = shard diff --git a/src/dbnode/storage/shard_index_test.go b/src/dbnode/storage/shard_index_test.go index df28627b38..9f5087ebb5 100644 --- a/src/dbnode/storage/shard_index_test.go +++ b/src/dbnode/storage/shard_index_test.go @@ -21,9 +21,7 @@ package storage import ( - "fmt" "sync" - "sync/atomic" "testing" "time" @@ -31,9 +29,9 @@ import ( "github.com/m3db/m3/src/dbnode/runtime" "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/m3ninx/doc" - xclock "github.com/m3db/m3/src/x/clock" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" + xtest "github.com/m3db/m3/src/x/test" xtime "github.com/m3db/m3/src/x/time" "github.com/fortytw2/leaktest" @@ -105,110 +103,16 @@ func TestShardInsertNamespaceIndex(t *testing.T) { require.Equal(t, []byte("value"), indexWrites[0].Fields[0].Value) } -func TestShardAsyncInsertNamespaceIndex(t *testing.T) { - defer leaktest.CheckTimeout(t, 2*time.Second)() - - opts := DefaultTestOptions() - lock := sync.RWMutex{} - indexWrites := []doc.Document{} - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - idx := NewMockNamespaceIndex(ctrl) - idx.EXPECT().WriteBatch(gomock.Any()).Do( - func(batch *index.WriteBatch) { - lock.Lock() - indexWrites = append(indexWrites, batch.PendingDocs()...) - lock.Unlock() - }).Return(nil).AnyTimes() - - shard := testDatabaseShardWithIndexFn(t, opts, idx, false) - shard.SetRuntimeOptions(runtime.NewOptions().SetWriteNewSeriesAsync(true)) - defer shard.Close() - - ctx := context.NewContext() - defer ctx.Close() - now := time.Now() - seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), - ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), - now, 1.0, xtime.Second, nil, series.WriteOptions{}) - assert.NoError(t, err) - assert.True(t, seriesWrite.WasWritten) - - seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), now, - 1.0, xtime.Second, nil, series.WriteOptions{}) - assert.NoError(t, err) - assert.True(t, seriesWrite.WasWritten) - - seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), - ident.NewTagsIterator(ident.NewTags( - ident.StringTag("all", "tags"), - ident.StringTag("should", "be-present"), - )), - now, 1.0, xtime.Second, nil, series.WriteOptions{}) - assert.NoError(t, err) - assert.True(t, seriesWrite.WasWritten) - - for { - lock.RLock() - l := len(indexWrites) - lock.RUnlock() - if l == 2 { - break - } - time.Sleep(10 * time.Millisecond) - } - lock.Lock() - defer lock.Unlock() - - assert.Len(t, indexWrites, 2) - for _, w := range indexWrites { - if string(w.ID) == "foo" { - assert.Equal(t, 1, len(w.Fields)) - assert.Equal(t, "name", string(w.Fields[0].Name)) - assert.Equal(t, "value", string(w.Fields[0].Value)) - } else if string(w.ID) == "baz" { - assert.Equal(t, 2, len(w.Fields)) - assert.Equal(t, "all", string(w.Fields[0].Name)) - assert.Equal(t, "tags", string(w.Fields[0].Value)) - assert.Equal(t, "should", string(w.Fields[1].Name)) - assert.Equal(t, "be-present", string(w.Fields[1].Value)) - } else { - assert.Fail(t, "unexpected write", w) - } - } -} - -func TestShardAsyncIndexOnlyWhenNotIndexed(t *testing.T) { +func TestShardAsyncInsertMarkIndexedForBlockStart(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() defer leaktest.CheckTimeout(t, 2*time.Second)() - var numCalls int32 opts := DefaultTestOptions() blockSize := time.Hour now := time.Now() nextWriteTime := now.Truncate(blockSize) idx := NewMockNamespaceIndex(ctrl) - idx.EXPECT().BlockStartForWriteTime(gomock.Any()). - DoAndReturn(func(t time.Time) xtime.UnixNano { - return xtime.ToUnixNano(t.Truncate(blockSize)) - }). - AnyTimes() - idx.EXPECT().WriteBatch(gomock.Any()).Do( - func(batch *index.WriteBatch) { - if batch.Len() == 0 { - panic(fmt.Errorf("expected batch of len 1")) // panic to avoid goroutine exit from require - } - onIdx := batch.PendingEntries()[0].OnIndexSeries - onIdx.OnIndexSuccess(xtime.ToUnixNano(nextWriteTime)) // i.e. mark that the entry should not be indexed for an hour at least - onIdx.OnIndexFinalize(xtime.ToUnixNano(nextWriteTime)) - current := atomic.AddInt32(&numCalls, 1) - if current > 1 { - panic("only need to index when not-indexed") - } - }).Return(nil) - shard := testDatabaseShardWithIndexFn(t, opts, idx, false) shard.SetRuntimeOptions(runtime.NewOptions().SetWriteNewSeriesAsync(true)) defer shard.Close() @@ -216,51 +120,39 @@ func TestShardAsyncIndexOnlyWhenNotIndexed(t *testing.T) { ctx := context.NewContext() defer ctx.Close() + // write first time seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) assert.True(t, seriesWrite.WasWritten) - - for { - if l := atomic.LoadInt32(&numCalls); l == 1 { - break + assert.True(t, seriesWrite.NeedsIndex) + + // mark as indexed + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(xtime.ToUnixNano(nextWriteTime)) + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(xtime.ToUnixNano(nextWriteTime)) + + start := time.Now() + for time.Since(start) < 10*time.Second { + entry, _, err := shard.tryRetrieveWritableSeries(ident.StringID("foo")) + require.NoError(t, err) + if entry == nil { + time.Sleep(10 * time.Millisecond) + continue } - time.Sleep(10 * time.Millisecond) + assert.True(t, entry.IndexedForBlockStart(xtime.ToUnixNano(nextWriteTime))) + break // done } - - // ensure we don't index once we have already indexed - seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), - ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), - now.Add(time.Second), 2.0, xtime.Second, nil, series.WriteOptions{}) - assert.NoError(t, err) - assert.True(t, seriesWrite.WasWritten) - - // ensure attempting to write same point yields false and does not write - seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), - ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), - now.Add(time.Second), 2.0, xtime.Second, nil, series.WriteOptions{}) - assert.NoError(t, err) - assert.False(t, seriesWrite.WasWritten) - - l := atomic.LoadInt32(&numCalls) - assert.Equal(t, int32(1), l) - - entry, _, err := shard.tryRetrieveWritableSeries(ident.StringID("foo")) - assert.NoError(t, err) - assert.True(t, entry.IndexedForBlockStart(xtime.ToUnixNano(nextWriteTime))) } func TestShardAsyncIndexIfExpired(t *testing.T) { defer leaktest.CheckTimeout(t, 2*time.Second)() - var numCalls int32 - // Make now not rounded exactly to the block size blockSize := time.Minute now := time.Now().Truncate(blockSize).Add(time.Second) - ctrl := gomock.NewController(t) + ctrl := xtest.NewController(t) defer ctrl.Finish() idx := NewMockNamespaceIndex(ctrl) idx.EXPECT().BlockStartForWriteTime(gomock.Any()). @@ -268,17 +160,6 @@ func TestShardAsyncIndexIfExpired(t *testing.T) { return xtime.ToUnixNano(t.Truncate(blockSize)) }). AnyTimes() - idx.EXPECT().WriteBatch(gomock.Any()). - Return(nil). - Do(func(batch *index.WriteBatch) { - for _, b := range batch.PendingEntries() { - blockStart := b.Timestamp.Truncate(blockSize) - b.OnIndexSeries.OnIndexSuccess(xtime.ToUnixNano(blockStart)) - b.OnIndexSeries.OnIndexFinalize(xtime.ToUnixNano(blockStart)) - atomic.AddInt32(&numCalls, 1) - } - }). - AnyTimes() opts := DefaultTestOptions() shard := testDatabaseShardWithIndexFn(t, opts, idx, false) @@ -293,33 +174,34 @@ func TestShardAsyncIndexIfExpired(t *testing.T) { now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) assert.True(t, seriesWrite.WasWritten) + assert.True(t, seriesWrite.NeedsIndex) + + // mark as indexed + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(xtime.ToUnixNano(now.Truncate(blockSize))) + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(xtime.ToUnixNano(now.Truncate(blockSize))) + + // make sure next block not marked as indexed + start := time.Now() + for time.Since(start) < 10*time.Second { + entry, _, err := shard.tryRetrieveWritableSeries(ident.StringID("foo")) + require.NoError(t, err) + if entry == nil { + time.Sleep(10 * time.Millisecond) + continue + } + assert.True(t, entry.IndexedForBlockStart( + xtime.ToUnixNano(now.Truncate(blockSize)))) + break // done + } - // wait till we're done indexing. - indexed := xclock.WaitUntil(func() bool { - return atomic.LoadInt32(&numCalls) == 1 - }, 2*time.Second) - assert.True(t, indexed) - - // ensure we index because it's expired + // ensure we would need to index next block because it's expired nextWriteTime := now.Add(blockSize) seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))), nextWriteTime, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) assert.True(t, seriesWrite.WasWritten) - - // wait till we're done indexing. - reIndexed := xclock.WaitUntil(func() bool { - return atomic.LoadInt32(&numCalls) == 2 - }, 2*time.Second) - assert.True(t, reIndexed) - - entry, _, err := shard.tryRetrieveWritableSeries(ident.StringID("foo")) - assert.NoError(t, err) - - // make sure we indexed the second write - assert.True(t, entry.IndexedForBlockStart( - xtime.ToUnixNano(nextWriteTime.Truncate(blockSize)))) + assert.True(t, seriesWrite.NeedsIndex) } // TODO(prateek): wire tests above to use the field `ts` diff --git a/src/dbnode/storage/shard_ref_count_test.go b/src/dbnode/storage/shard_ref_count_test.go index ad761745f0..c320b1a56c 100644 --- a/src/dbnode/storage/shard_ref_count_test.go +++ b/src/dbnode/storage/shard_ref_count_test.go @@ -412,16 +412,25 @@ func testShardWriteTaggedAsyncRefCount(t *testing.T, idx NamespaceIndex, nowFn f ident.EmptyTagIterator, now, 1.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) assert.True(t, seriesWrite.WasWritten) + assert.True(t, seriesWrite.NeedsIndex) + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(idx.BlockStartForWriteTime(now)) + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(idx.BlockStartForWriteTime(now)) seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, now, 2.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) assert.True(t, seriesWrite.WasWritten) + assert.True(t, seriesWrite.NeedsIndex) + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(idx.BlockStartForWriteTime(now)) + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(idx.BlockStartForWriteTime(now)) seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, now, 3.0, xtime.Second, nil, series.WriteOptions{}) assert.NoError(t, err) assert.True(t, seriesWrite.WasWritten) + assert.True(t, seriesWrite.NeedsIndex) + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(idx.BlockStartForWriteTime(now)) + seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(idx.BlockStartForWriteTime(now)) inserted := xclock.WaitUntil(func() bool { counter, ok := testReporter.Counters()["dbshard.insert-queue.inserts"] diff --git a/src/dbnode/storage/shard_test.go b/src/dbnode/storage/shard_test.go index a1c4501f49..3b15b7e4b0 100644 --- a/src/dbnode/storage/shard_test.go +++ b/src/dbnode/storage/shard_test.go @@ -45,6 +45,7 @@ import ( "github.com/m3db/m3/src/dbnode/ts" xmetrics "github.com/m3db/m3/src/dbnode/x/metrics" "github.com/m3db/m3/src/dbnode/x/xio" + "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/x/checked" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" @@ -560,7 +561,7 @@ func TestShardColdFlush(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(dir) - ctrl := gomock.NewController(t) + ctrl := xtest.NewController(t) defer ctrl.Finish() now := time.Now() nowFn := func() time.Time { @@ -612,7 +613,8 @@ func TestShardColdFlush(t *testing.T) { } for _, ds := range dirtyData { curr := series.NewMockDatabaseSeries(ctrl) - curr.EXPECT().ID().Return(ds.id) + curr.EXPECT().ID().Return(ds.id).AnyTimes() + curr.EXPECT().Metadata().Return(doc.Document{ID: ds.id.Bytes()}).AnyTimes() curr.EXPECT().ColdFlushBlockStarts(gomock.Any()). Return(optimizedTimesFromTimes(ds.dirtyTimes)) shard.list.PushBack(lookup.NewEntry(curr, 0)) @@ -1649,7 +1651,7 @@ func TestShardReadEncodedCachesSeriesWithRecentlyReadPolicy(t *testing.T) { } func TestShardNewInvalidShardEntry(t *testing.T) { - ctrl := gomock.NewController(t) + ctrl := xtest.NewController(t) defer ctrl.Finish() shard := testDatabaseShard(t, DefaultTestOptions()) @@ -1658,8 +1660,7 @@ func TestShardNewInvalidShardEntry(t *testing.T) { iter := ident.NewMockTagIterator(ctrl) gomock.InOrder( iter.EXPECT().Duplicate().Return(iter), - iter.EXPECT().CurrentIndex().Return(0), - iter.EXPECT().Len().Return(0), + iter.EXPECT().Remaining().Return(8), iter.EXPECT().Next().Return(false), iter.EXPECT().Err().Return(fmt.Errorf("random err")), iter.EXPECT().Close(), @@ -1686,7 +1687,7 @@ func TestShardNewValidShardEntry(t *testing.T) { // either to retry inserting a series or to finalize the tags at the // end of a request/response cycle or from a disk retrieve cycle. func TestShardNewEntryDoesNotAlterIDOrTags(t *testing.T) { - ctrl := gomock.NewController(t) + ctrl := xtest.NewController(t) defer ctrl.Finish() shard := testDatabaseShard(t, DefaultTestOptions()) @@ -1700,7 +1701,6 @@ func TestShardNewEntryDoesNotAlterIDOrTags(t *testing.T) { // Ensure copied with call to bytes but no close call, etc id := ident.NewMockID(ctrl) - id.EXPECT().IsNoFinalize().Times(1).Return(false) id.EXPECT().Bytes().Times(1).Return(seriesID.Bytes()) iter := ident.NewMockTagIterator(ctrl) @@ -1730,56 +1730,6 @@ func TestShardNewEntryDoesNotAlterIDOrTags(t *testing.T) { assert.False(t, unsafe.Pointer(&entryIDBytes[0]) == unsafe.Pointer(&seriesIDBytes[0])) } -// TestShardNewEntryTakesRefToNoFinalizeID ensures that when an ID is -// marked as NoFinalize that newShardEntry simply takes a ref as it can -// safely be assured the ID is not pooled. -func TestShardNewEntryTakesRefToNoFinalizeID(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - shard := testDatabaseShard(t, DefaultTestOptions()) - defer shard.Close() - - seriesID := ident.BytesID([]byte("foo+bar=baz")) - seriesTags := ident.NewTags(ident.Tag{ - Name: ident.StringID("bar"), - Value: ident.StringID("baz"), - }) - - // Ensure copied with call to bytes but no close call, etc - id := ident.NewMockID(ctrl) - id.EXPECT().IsNoFinalize().Times(1).Return(true) - id.EXPECT().Bytes().Times(1).Return(seriesID.Bytes()) - - iter := ident.NewMockTagIterator(ctrl) - - // Ensure duplicate called but no close, etc - iter.EXPECT(). - Duplicate(). - Times(1). - Return(ident.NewTagsIterator(seriesTags)) - - entry, err := shard.newShardEntry(id, newTagsIterArg(iter)) - require.NoError(t, err) - - shard.Lock() - shard.insertNewShardEntryWithLock(entry) - shard.Unlock() - - entry, _, err = shard.tryRetrieveWritableSeries(seriesID) - require.NoError(t, err) - - assert.True(t, entry.Series.ID().Equal(seriesID)) - - entryIDBytes := entry.Series.ID().Bytes() - seriesIDBytes := seriesID.Bytes() - - // Ensure ID equal and same ref - assert.True(t, entry.Series.ID().Equal(seriesID)) - // NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section - assert.True(t, unsafe.Pointer(&entryIDBytes[0]) == unsafe.Pointer(&seriesIDBytes[0])) -} - func TestShardIterateBatchSize(t *testing.T) { smaller := shardIterateBatchMinSize - 1 require.Equal(t, shardIterateBatchMinSize, iterateBatchSize(smaller)) From fd49a28d40e4a30169c6d13931f2bdf545dcf81d Mon Sep 17 00:00:00 2001 From: Bo Du Date: Tue, 30 Jun 2020 22:46:46 -0400 Subject: [PATCH 34/37] Fix convert fn. --- .../storage/bootstrap/bootstrapper/fs/source_index_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_test.go index afe3dd8c69..c54900d841 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_test.go @@ -150,7 +150,7 @@ func writeTSDBPersistedIndexBlock( require.NoError(t, err) for _, series := range block { - d, err := convert.FromMetric(series.ID(), series.Tags()) + d, err := convert.FromSeriesIDAndTags(series.ID(), series.Tags()) require.NoError(t, err) exists, err := seg.ContainsID(series.ID().Bytes()) require.NoError(t, err) From cd59420b43c0168a3d5c25293e0d71b031ea59b3 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 22:48:12 -0400 Subject: [PATCH 35/37] Fix dbnode/storage/index/convert --- src/dbnode/storage/index/convert/convert_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/dbnode/storage/index/convert/convert_test.go b/src/dbnode/storage/index/convert/convert_test.go index 3a3ab1b043..932dc3c493 100644 --- a/src/dbnode/storage/index/convert/convert_test.go +++ b/src/dbnode/storage/index/convert/convert_test.go @@ -179,7 +179,7 @@ func TestValidateSeries(t *testing.T) { Value: ident.StringID("baz"), })) require.Error(t, err) - assert.Contains(t, err.Error(), "invalid ID") + assert.Contains(t, err.Error(), "invalid non-UTF8 ID") }) t.Run("tag name reserved", func(t *testing.T) { @@ -200,7 +200,7 @@ func TestValidateSeries(t *testing.T) { Value: ident.StringID("bar"), })) require.Error(t, err) - assert.Contains(t, err.Error(), "invalid field name") + assert.Contains(t, err.Error(), "invalid non-UTF8 field name") }) t.Run("tag value non-utf8", func(t *testing.T) { @@ -210,7 +210,7 @@ func TestValidateSeries(t *testing.T) { Value: ident.BinaryID(invalidBytes), })) require.Error(t, err) - assert.Contains(t, err.Error(), "invalid field value") + assert.Contains(t, err.Error(), "invalid non-UTF8 field value") }) } From 1a61f11496aad22e72ba87dcaa09823eb9ca40d2 Mon Sep 17 00:00:00 2001 From: Bo Du Date: Tue, 30 Jun 2020 22:51:17 -0400 Subject: [PATCH 36/37] Fix bootstrap fs test. --- .../storage/bootstrap/bootstrapper/fs/source_data_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go index 7bff19b3f0..ee249918f0 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go @@ -32,6 +32,7 @@ import ( "github.com/m3db/m3/src/dbnode/digest" "github.com/m3db/m3/src/dbnode/namespace" + "github.com/m3db/m3/src/dbnode/persist" "github.com/m3db/m3/src/dbnode/persist/fs" "github.com/m3db/m3/src/dbnode/retention" "github.com/m3db/m3/src/dbnode/storage/bootstrap" @@ -260,8 +261,9 @@ func writeTSDBFiles( for _, v := range series { bytes := checked.NewBytes(v.data, nil) bytes.IncRef() - require.NoError(t, w.Write(ident.StringID(v.id), - sortedTagsFromTagsMap(v.tags), bytes, digest.Checksum(bytes.Bytes()))) + metadata := persist.NewMetadataFromIDAndTags(ident.StringID(v.id), sortedTagsFromTagsMap(v.tags), + persist.MetadataOptions{}) + require.NoError(t, w.Write(metadata, bytes, digest.Checksum(bytes.Bytes()))) bytes.DecRef() } From fe4b2f91f059f0183e1adb6860262d921a0cc1c8 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Tue, 30 Jun 2020 22:52:47 -0400 Subject: [PATCH 37/37] Fix nit --- src/dbnode/storage/shard.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index c4ecb427bb..1df1b562f1 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -939,7 +939,7 @@ func (s *dbShard) writeAndIndex( if err == nil && shouldReverseIndex { if entry.NeedsIndexUpdate(s.reverseIndex.BlockStartForWriteTime(timestamp)) { if !opts.writeNewSeriesAsync { - return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enable") + return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enabled") } needsIndex = true pendingIndexInsert = s.pendingIndexInsert(entry, timestamp) @@ -978,7 +978,7 @@ func (s *dbShard) writeAndIndex( if shouldReverseIndex { if !opts.writeNewSeriesAsync { - return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enable") + return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enabled") } needsIndex = true pendingIndexInsert = s.pendingIndexInsert(result.entry, timestamp)