diff --git a/src/cmd/services/m3coordinator/downsample/id_pool_types.go b/src/cmd/services/m3coordinator/downsample/id_pool_types.go index 46303a9e93..2d5981111a 100644 --- a/src/cmd/services/m3coordinator/downsample/id_pool_types.go +++ b/src/cmd/services/m3coordinator/downsample/id_pool_types.go @@ -76,8 +76,8 @@ type rollupIDProvider struct { nameTag ident.ID nameTagBytes []byte nameTagBeforeRollupTag bool - tagNameID *ident.ReuseableBytesID - tagValueID *ident.ReuseableBytesID + tagNameID *ident.ReusableBytesID + tagValueID *ident.ReusableBytesID } func newRollupIDProvider( @@ -93,8 +93,8 @@ func newRollupIDProvider( nameTag: nameTag, nameTagBytes: nameTagBytes, nameTagBeforeRollupTag: nameTagBeforeRollupTag, - tagNameID: ident.NewReuseableBytesID(), - tagValueID: ident.NewReuseableBytesID(), + tagNameID: ident.NewReusableBytesID(), + tagValueID: ident.NewReusableBytesID(), } } diff --git a/src/cmd/services/m3coordinator/downsample/tags.go b/src/cmd/services/m3coordinator/downsample/tags.go index 2c5985cb0e..35bfc70dc5 100644 --- a/src/cmd/services/m3coordinator/downsample/tags.go +++ b/src/cmd/services/m3coordinator/downsample/tags.go @@ -33,13 +33,13 @@ const ( ) type tags struct { - names [][]byte - values [][]byte - idx int - nameBuf []byte - valueBuf []byte - reuseableTagName *ident.ReuseableBytesID - reuseableTagValue *ident.ReuseableBytesID + names [][]byte + values [][]byte + idx int + nameBuf []byte + valueBuf []byte + reusableTagName *ident.ReusableBytesID + reusableTagValue *ident.ReusableBytesID } // Ensure tags implements TagIterator and sort Interface @@ -50,11 +50,11 @@ var ( func newTags() *tags { return &tags{ - names: make([][]byte, 0, initAllocTagsSliceCapacity), - values: make([][]byte, 0, initAllocTagsSliceCapacity), - idx: -1, - reuseableTagName: ident.NewReuseableBytesID(), - reuseableTagValue: ident.NewReuseableBytesID(), + names: make([][]byte, 0, initAllocTagsSliceCapacity), + values: make([][]byte, 0, initAllocTagsSliceCapacity), + idx: -1, + reusableTagName: ident.NewReusableBytesID(), + reusableTagValue: ident.NewReusableBytesID(), } } @@ -131,11 +131,11 @@ func (t *tags) CurrentIndex() int { func (t *tags) Current() ident.Tag { t.nameBuf = append(t.nameBuf[:0], t.names[t.idx]...) t.valueBuf = append(t.valueBuf[:0], t.values[t.idx]...) - t.reuseableTagName.Reset(t.nameBuf) - t.reuseableTagValue.Reset(t.valueBuf) + t.reusableTagName.Reset(t.nameBuf) + t.reusableTagValue.Reset(t.valueBuf) return ident.Tag{ - Name: t.reuseableTagName, - Value: t.reuseableTagValue, + Name: t.reusableTagName, + Value: t.reusableTagValue, } } diff --git a/src/dbnode/encoding/iterators.go b/src/dbnode/encoding/iterators.go index 7377794e2b..a1359a51b1 100644 --- a/src/dbnode/encoding/iterators.go +++ b/src/dbnode/encoding/iterators.go @@ -89,7 +89,7 @@ func (i *iterators) current() (ts.Datapoint, xtime.Unit, ts.Annotation) { return freqA < freqB }) - // Reset reuseable value frequencies + // Reset reusable value frequencies for key := range i.valueFrequencies { delete(i.valueFrequencies, key) } diff --git a/src/dbnode/persist/fs/commitlog/reader.go b/src/dbnode/persist/fs/commitlog/reader.go index 4e2fe77938..c8a6acea1d 100644 --- a/src/dbnode/persist/fs/commitlog/reader.go +++ b/src/dbnode/persist/fs/commitlog/reader.go @@ -84,7 +84,7 @@ type reader struct { metadataLookup map[uint64]ts.Series namespacesRead []namespaceRead - seriesIDReused *ident.ReuseableBytesID + seriesIDReused *ident.ReusableBytesID } type namespaceRead struct { @@ -111,7 +111,7 @@ func newCommitLogReader(opts commitLogReaderOptions) commitLogReader { chunkReader: newChunkReader(opts.commitLogOptions.FlushSize()), infoDecoder: msgpack.NewDecoder(opts.commitLogOptions.FilesystemOptions().DecodingOptions()), infoDecoderStream: msgpack.NewByteDecoderStream(nil), - seriesIDReused: ident.NewReuseableBytesID(), + seriesIDReused: ident.NewReusableBytesID(), } } diff --git a/src/dbnode/persist/fs/retriever.go b/src/dbnode/persist/fs/retriever.go index 028e3bf3dc..256eca347c 100644 --- a/src/dbnode/persist/fs/retriever.go +++ b/src/dbnode/persist/fs/retriever.go @@ -200,7 +200,7 @@ func (r *blockRetriever) AssignShardSet(shardSet sharding.ShardSet) { func (r *blockRetriever) fetchLoop(seekerMgr DataFileSetSeekerManager) { var ( seekerResources = NewReusableSeekerResources(r.fsOpts) - retrieverResources = newReuseableRetrieverResources() + retrieverResources = newReusableRetrieverResources() inFlight []*retrieveRequest currBatchReqs []*retrieveRequest ) @@ -302,7 +302,7 @@ func (r *blockRetriever) filterAndCompleteWideReqs( reqs []*retrieveRequest, seeker ConcurrentDataFileSetSeeker, seekerResources ReusableSeekerResources, - retrieverResources *reuseableRetrieverResources, + retrieverResources *reusableRetrieverResources, ) []*retrieveRequest { retrieverResources.resetDataReqs() retrieverResources.resetWideEntryReqs() @@ -367,7 +367,7 @@ func (r *blockRetriever) fetchBatch( blockStart time.Time, allReqs []*retrieveRequest, seekerResources ReusableSeekerResources, - retrieverResources *reuseableRetrieverResources, + retrieverResources *reusableRetrieverResources, ) { var ( seeker ConcurrentDataFileSetSeeker @@ -1045,35 +1045,35 @@ func (p *reqPool) Put(req *retrieveRequest) { p.pool.Put(req) } -type reuseableRetrieverResources struct { +type reusableRetrieverResources struct { dataReqs []*retrieveRequest wideEntryReqs []*retrieveRequest } -func newReuseableRetrieverResources() *reuseableRetrieverResources { - return &reuseableRetrieverResources{} +func newReusableRetrieverResources() *reusableRetrieverResources { + return &reusableRetrieverResources{} } -func (r *reuseableRetrieverResources) resetAll() { +func (r *reusableRetrieverResources) resetAll() { r.resetDataReqs() r.resetWideEntryReqs() } -func (r *reuseableRetrieverResources) resetDataReqs() { +func (r *reusableRetrieverResources) resetDataReqs() { for i := range r.dataReqs { r.dataReqs[i] = nil } r.dataReqs = r.dataReqs[:0] } -func (r *reuseableRetrieverResources) resetWideEntryReqs() { +func (r *reusableRetrieverResources) resetWideEntryReqs() { for i := range r.wideEntryReqs { r.wideEntryReqs[i] = nil } r.wideEntryReqs = r.wideEntryReqs[:0] } -func (r *reuseableRetrieverResources) appendWideEntryReq( +func (r *reusableRetrieverResources) appendWideEntryReq( req *retrieveRequest, ) { r.wideEntryReqs = append(r.wideEntryReqs, req) diff --git a/src/dbnode/persist/fs/write.go b/src/dbnode/persist/fs/write.go index 939609547c..0f9d46f1ec 100644 --- a/src/dbnode/persist/fs/write.go +++ b/src/dbnode/persist/fs/write.go @@ -459,10 +459,10 @@ func (w *writer) writeIndexFileContents( sort.Sort(w.indexEntries) var ( - offset int64 - prevID []byte - tagsReuseable = w.tagsIterator - tagsEncoder = w.tagEncoderPool.Get() + offset int64 + prevID []byte + tagsReusable = w.tagsIterator + tagsEncoder = w.tagEncoderPool.Get() ) defer tagsEncoder.Finalize() for i, entry := range w.indexEntries { @@ -474,7 +474,7 @@ func (w *writer) writeIndexFileContents( return fmt.Errorf("encountered duplicate ID: %s", id) } - tagsIter, err := metadata.ResetOrReturnProvidedTagIterator(tagsReuseable) + tagsIter, err := metadata.ResetOrReturnProvidedTagIterator(tagsReusable) if err != nil { return err } diff --git a/src/dbnode/persist/types.go b/src/dbnode/persist/types.go index c01b4aa8ca..4b4b195b53 100644 --- a/src/dbnode/persist/types.go +++ b/src/dbnode/persist/types.go @@ -35,7 +35,7 @@ import ( "github.com/pborman/uuid" ) -var errReuseableTagIteratorRequired = errors.New("reuseable tags iterator is required") +var errReusableTagIteratorRequired = errors.New("reusable tags iterator is required") // Metadata is metadata for a time series, it can // have several underlying sources. @@ -101,30 +101,30 @@ func (m Metadata) BytesID() []byte { // ResetOrReturnProvidedTagIterator returns a tag iterator // for the series, returning a direct ref to a provided tag -// iterator or using the reuseable tag iterator provided by the +// iterator or using the reusable tag iterator provided by the // callsite if it needs to iterate over tags or fields. func (m Metadata) ResetOrReturnProvidedTagIterator( - reuseableTagsIterator ident.TagsIterator, + reusableTagsIterator ident.TagsIterator, ) (ident.TagIterator, error) { - if reuseableTagsIterator == nil { + if reusableTagsIterator == nil { // Always check to make sure callsites won't // get a bad allocation pattern of having // to create one here inline if the metadata // they are passing in suddenly changes from // tagsIter to tags or fields with metadata. - return nil, errReuseableTagIteratorRequired + return nil, errReusableTagIteratorRequired } if m.tagsIter != nil { return m.tagsIter, nil } if len(m.tags.Values()) > 0 { - reuseableTagsIterator.Reset(m.tags) - return reuseableTagsIterator, reuseableTagsIterator.Err() + reusableTagsIterator.Reset(m.tags) + return reusableTagsIterator, reusableTagsIterator.Err() } - reuseableTagsIterator.ResetFields(m.metadata.Fields) - return reuseableTagsIterator, reuseableTagsIterator.Err() + reusableTagsIterator.ResetFields(m.metadata.Fields) + return reusableTagsIterator, reusableTagsIterator.Err() } // Finalize will finalize any resources that requested diff --git a/src/dbnode/storage/cluster/database.go b/src/dbnode/storage/cluster/database.go index 75a77b7d6d..280d0e498c 100644 --- a/src/dbnode/storage/cluster/database.go +++ b/src/dbnode/storage/cluster/database.go @@ -357,9 +357,9 @@ func (d *clusterDB) analyzeAndReportShardStates() { defer reportStats() - // Manage the reuseable vars - d.resetReuseable() - defer d.resetReuseable() + // Manage the reusable vars + d.resetReusable() + defer d.resetReusable() for _, s := range entry.ShardSet().All() { if s.State() == shard.Initializing { @@ -441,7 +441,7 @@ func (d *clusterDB) analyzeAndReportShardStates() { zap.Uint32s("shards", markAvailable)) } -func (d *clusterDB) resetReuseable() { +func (d *clusterDB) resetReusable() { d.resetInitializing() d.resetBootstrapCount() } diff --git a/src/dbnode/storage/fs_merge_with_mem.go b/src/dbnode/storage/fs_merge_with_mem.go index ab409c1d17..e040df6ae4 100644 --- a/src/dbnode/storage/fs_merge_with_mem.go +++ b/src/dbnode/storage/fs_merge_with_mem.go @@ -42,7 +42,7 @@ type fsMergeWithMem struct { retriever series.QueryableBlockRetriever dirtySeries *dirtySeriesMap dirtySeriesToWrite map[xtime.UnixNano]*idList - reuseableID *ident.ReuseableBytesID + reusableID *ident.ReusableBytesID } func newFSMergeWithMem( @@ -56,7 +56,7 @@ func newFSMergeWithMem( retriever: retriever, dirtySeries: dirtySeries, dirtySeriesToWrite: dirtySeriesToWrite, - reuseableID: ident.NewReuseableBytesID(), + reusableID: ident.NewReusableBytesID(), } } @@ -123,13 +123,13 @@ func (m *fsMergeWithMem) ForEachRemaining( fn fs.ForEachRemainingFn, nsCtx namespace.Context, ) error { - reuseableID := m.reuseableID + reusableID := m.reusableID seriesList := m.dirtySeriesToWrite[blockStart] for seriesElement := seriesList.Front(); seriesElement != nil; seriesElement = seriesElement.Next() { seriesMetadata := seriesElement.Value - reuseableID.Reset(seriesMetadata.ID) - mergeWithData, hasData, err := m.fetchBlocks(ctx, reuseableID, blockStart, nsCtx) + reusableID.Reset(seriesMetadata.ID) + mergeWithData, hasData, err := m.fetchBlocks(ctx, reusableID, blockStart, nsCtx) if err != nil { return err } diff --git a/src/dbnode/storage/namespace.go b/src/dbnode/storage/namespace.go index e41e2dcb1f..6a45b1cf71 100644 --- a/src/dbnode/storage/namespace.go +++ b/src/dbnode/storage/namespace.go @@ -1187,7 +1187,7 @@ type idAndBlockStart struct { blockStart xtime.UnixNano } -type coldFlushReuseableResources struct { +type coldFlushReusableResources struct { // dirtySeries is a map from a composite key of // to an element in a list in the dirtySeriesToWrite map. This map is used // to quickly test whether a series is dirty for a particular block start. @@ -1209,22 +1209,22 @@ type coldFlushReuseableResources struct { fsReader fs.DataFileSetReader } -func newColdFlushReuseableResources(opts Options) (coldFlushReuseableResources, error) { +func newColdFlushReusableResources(opts Options) coldFlushReusableResources { fsReader, err := fs.NewReader(opts.BytesPool(), opts.CommitLogOptions().FilesystemOptions()) if err != nil { - return coldFlushReuseableResources{}, nil + return coldFlushReusableResources{} } - return coldFlushReuseableResources{ + return coldFlushReusableResources{ dirtySeries: newDirtySeriesMap(), dirtySeriesToWrite: make(map[xtime.UnixNano]*idList), // TODO(juchan): set pool options. idElementPool: newIDElementPool(nil), fsReader: fsReader, - }, nil + } } -func (r *coldFlushReuseableResources) reset() { +func (r *coldFlushReusableResources) reset() { for _, seriesList := range r.dirtySeriesToWrite { if seriesList != nil { seriesList.Reset() @@ -1259,12 +1259,7 @@ func (n *dbNamespace) ColdFlush(flushPersist persist.FlushPreparer) error { } shards := n.OwnedShards() - - resources, err := newColdFlushReuseableResources(n.opts) - if err != nil { - n.metrics.flushColdData.ReportError(n.nowFn().Sub(callStart)) - return err - } + resources := newColdFlushReusableResources(n.opts) // NB(bodu): The in-mem index will lag behind the TSDB in terms of new series writes. For a period of // time between when we rotate out the active cold mutable index segments (happens here) and when @@ -1273,6 +1268,7 @@ func (n *dbNamespace) ColdFlush(flushPersist persist.FlushPreparer) error { // where they will be evicted from the in-mem index. var ( onColdFlushDone OnColdFlushDone + err error ) if n.reverseIndex != nil { onColdFlushDone, err = n.reverseIndex.ColdFlush(shards) diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go index 26d2a6bae3..591686e98f 100644 --- a/src/dbnode/storage/shard.go +++ b/src/dbnode/storage/shard.go @@ -2287,7 +2287,7 @@ func (s *dbShard) WarmFlush( func (s *dbShard) ColdFlush( flushPreparer persist.FlushPreparer, - resources coldFlushReuseableResources, + resources coldFlushReusableResources, nsCtx namespace.Context, onFlushSeries persist.OnFlushSeries, ) (ShardColdFlush, error) { diff --git a/src/dbnode/storage/shard_test.go b/src/dbnode/storage/shard_test.go index cb22865d67..6e4e6a989f 100644 --- a/src/dbnode/storage/shard_test.go +++ b/src/dbnode/storage/shard_test.go @@ -638,7 +638,7 @@ func TestShardColdFlush(t *testing.T) { preparer := persist.NewMockFlushPreparer(ctrl) fsReader := fs.NewMockDataFileSetReader(ctrl) - resources := coldFlushReuseableResources{ + resources := coldFlushReusableResources{ dirtySeries: newDirtySeriesMap(), dirtySeriesToWrite: make(map[xtime.UnixNano]*idList), idElementPool: newIDElementPool(nil), @@ -711,7 +711,7 @@ func TestShardColdFlushNoMergeIfNothingDirty(t *testing.T) { dirtySeriesToWrite[xtime.ToUnixNano(t2)] = newIDList(idElementPool) dirtySeriesToWrite[xtime.ToUnixNano(t3)] = newIDList(idElementPool) - resources := coldFlushReuseableResources{ + resources := coldFlushReusableResources{ dirtySeries: newDirtySeriesMap(), dirtySeriesToWrite: dirtySeriesToWrite, idElementPool: idElementPool, diff --git a/src/dbnode/storage/storage_mock.go b/src/dbnode/storage/storage_mock.go index 8871f8c249..bf9c30d220 100644 --- a/src/dbnode/storage/storage_mock.go +++ b/src/dbnode/storage/storage_mock.go @@ -2169,7 +2169,7 @@ func (mr *MockdatabaseShardMockRecorder) WarmFlush(blockStart, flush, nsCtx inte } // ColdFlush mocks base method -func (m *MockdatabaseShard) ColdFlush(flush persist.FlushPreparer, resources coldFlushReuseableResources, nsCtx namespace.Context, onFlush persist.OnFlushSeries) (ShardColdFlush, error) { +func (m *MockdatabaseShard) ColdFlush(flush persist.FlushPreparer, resources coldFlushReusableResources, nsCtx namespace.Context, onFlush persist.OnFlushSeries) (ShardColdFlush, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ColdFlush", flush, resources, nsCtx, onFlush) ret0, _ := ret[0].(ShardColdFlush) diff --git a/src/dbnode/storage/types.go b/src/dbnode/storage/types.go index 0ac72ebb6b..20705b0abe 100644 --- a/src/dbnode/storage/types.go +++ b/src/dbnode/storage/types.go @@ -601,7 +601,7 @@ type databaseShard interface { // ColdFlush flushes the unflushed ColdWrites in this shard. ColdFlush( flush persist.FlushPreparer, - resources coldFlushReuseableResources, + resources coldFlushReusableResources, nsCtx namespace.Context, onFlush persist.OnFlushSeries, ) (ShardColdFlush, error) diff --git a/src/x/ident/bytes_id.go b/src/x/ident/bytes_id.go index 3f7990266c..ed23475307 100644 --- a/src/x/ident/bytes_id.go +++ b/src/x/ident/bytes_id.go @@ -59,55 +59,55 @@ func (v BytesID) IsNoFinalize() bool { func (v BytesID) Finalize() { } -var _ ID = (*ReuseableBytesID)(nil) +var _ ID = (*ReusableBytesID)(nil) -// ReuseableBytesID is a reuseable bytes ID, use with extreme care in +// ReusableBytesID is a reusable bytes ID, use with extreme care in // places where the lifecycle is known (there is no checking with this // ID). -type ReuseableBytesID struct { +type ReusableBytesID struct { bytes []byte } -// NewReuseableBytesID returns a new reuseable bytes ID, use with extreme +// NewReusableBytesID returns a new reusable bytes ID, use with extreme // care in places where the lifecycle is known (there is no checking with // this ID). -func NewReuseableBytesID() *ReuseableBytesID { - return &ReuseableBytesID{} +func NewReusableBytesID() *ReusableBytesID { + return &ReusableBytesID{} } // Reset resets the bytes ID for reuse, make sure there are zero references // to this ID from any other data structure at this point. -func (i *ReuseableBytesID) Reset(bytes []byte) { +func (i *ReusableBytesID) Reset(bytes []byte) { i.bytes = bytes } // Bytes implements ID. -func (i *ReuseableBytesID) Bytes() []byte { +func (i *ReusableBytesID) Bytes() []byte { return i.bytes } // Equal implements ID. -func (i *ReuseableBytesID) Equal(value ID) bool { +func (i *ReusableBytesID) Equal(value ID) bool { return bytes.Equal(i.bytes, value.Bytes()) } // NoFinalize implements ID. -func (i *ReuseableBytesID) NoFinalize() { +func (i *ReusableBytesID) NoFinalize() { } // IsNoFinalize implements ID. -func (i *ReuseableBytesID) IsNoFinalize() bool { - // Reuseable bytes ID are always not able to not be finalized +func (i *ReusableBytesID) IsNoFinalize() bool { + // Reusable bytes ID are always not able to not be finalized // as this ID is reused with reset. return false } // Finalize implements ID. -func (i *ReuseableBytesID) Finalize() { +func (i *ReusableBytesID) Finalize() { // Noop as it will be re-used. } // String returns the bytes ID as a string. -func (i *ReuseableBytesID) String() string { +func (i *ReusableBytesID) String() string { return string(i.bytes) } diff --git a/src/x/ident/tag_iterator.go b/src/x/ident/tag_iterator.go index f51508b0c3..bded039e7b 100644 --- a/src/x/ident/tag_iterator.go +++ b/src/x/ident/tag_iterator.go @@ -68,11 +68,11 @@ func newTagSliceIter( pool Pool, ) *tagSliceIter { iter := &tagSliceIter{ - nameBytesID: NewReuseableBytesID(), - valueBytesID: NewReuseableBytesID(), + nameBytesID: NewReusableBytesID(), + valueBytesID: NewReusableBytesID(), pool: pool, } - iter.currentReuseableTag = Tag{ + iter.currentReusableTag = Tag{ Name: iter.nameBytesID, Value: iter.valueBytesID, } @@ -97,13 +97,13 @@ type tagsSlice struct { } type tagSliceIter struct { - backingSlice tagsSlice - currentIdx int - currentTag Tag - currentReuseableTag Tag - nameBytesID *ReuseableBytesID - valueBytesID *ReuseableBytesID - pool Pool + backingSlice tagsSlice + currentIdx int + currentTag Tag + currentReusableTag Tag + nameBytesID *ReusableBytesID + valueBytesID *ReusableBytesID + pool Pool } func (i *tagSliceIter) Next() bool { @@ -115,7 +115,7 @@ func (i *tagSliceIter) Next() bool { } else { i.nameBytesID.Reset(i.backingSlice.fields[i.currentIdx].Name) i.valueBytesID.Reset(i.backingSlice.fields[i.currentIdx].Value) - i.currentTag = i.currentReuseableTag + i.currentTag = i.currentReusableTag } return true }