Skip to content

Commit

Permalink
[lint] Update instances of Reuseable -> Reusable (#2950)
Browse files Browse the repository at this point in the history
  • Loading branch information
arnikola authored Nov 27, 2020
1 parent 400ed87 commit dc6eadf
Show file tree
Hide file tree
Showing 16 changed files with 94 additions and 98 deletions.
8 changes: 4 additions & 4 deletions src/cmd/services/m3coordinator/downsample/id_pool_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ type rollupIDProvider struct {
nameTag ident.ID
nameTagBytes []byte
nameTagBeforeRollupTag bool
tagNameID *ident.ReuseableBytesID
tagValueID *ident.ReuseableBytesID
tagNameID *ident.ReusableBytesID
tagValueID *ident.ReusableBytesID
}

func newRollupIDProvider(
Expand All @@ -93,8 +93,8 @@ func newRollupIDProvider(
nameTag: nameTag,
nameTagBytes: nameTagBytes,
nameTagBeforeRollupTag: nameTagBeforeRollupTag,
tagNameID: ident.NewReuseableBytesID(),
tagValueID: ident.NewReuseableBytesID(),
tagNameID: ident.NewReusableBytesID(),
tagValueID: ident.NewReusableBytesID(),
}
}

Expand Down
32 changes: 16 additions & 16 deletions src/cmd/services/m3coordinator/downsample/tags.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,13 @@ const (
)

type tags struct {
names [][]byte
values [][]byte
idx int
nameBuf []byte
valueBuf []byte
reuseableTagName *ident.ReuseableBytesID
reuseableTagValue *ident.ReuseableBytesID
names [][]byte
values [][]byte
idx int
nameBuf []byte
valueBuf []byte
reusableTagName *ident.ReusableBytesID
reusableTagValue *ident.ReusableBytesID
}

// Ensure tags implements TagIterator and sort Interface
Expand All @@ -50,11 +50,11 @@ var (

func newTags() *tags {
return &tags{
names: make([][]byte, 0, initAllocTagsSliceCapacity),
values: make([][]byte, 0, initAllocTagsSliceCapacity),
idx: -1,
reuseableTagName: ident.NewReuseableBytesID(),
reuseableTagValue: ident.NewReuseableBytesID(),
names: make([][]byte, 0, initAllocTagsSliceCapacity),
values: make([][]byte, 0, initAllocTagsSliceCapacity),
idx: -1,
reusableTagName: ident.NewReusableBytesID(),
reusableTagValue: ident.NewReusableBytesID(),
}
}

Expand Down Expand Up @@ -131,11 +131,11 @@ func (t *tags) CurrentIndex() int {
func (t *tags) Current() ident.Tag {
t.nameBuf = append(t.nameBuf[:0], t.names[t.idx]...)
t.valueBuf = append(t.valueBuf[:0], t.values[t.idx]...)
t.reuseableTagName.Reset(t.nameBuf)
t.reuseableTagValue.Reset(t.valueBuf)
t.reusableTagName.Reset(t.nameBuf)
t.reusableTagValue.Reset(t.valueBuf)
return ident.Tag{
Name: t.reuseableTagName,
Value: t.reuseableTagValue,
Name: t.reusableTagName,
Value: t.reusableTagValue,
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/dbnode/encoding/iterators.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func (i *iterators) current() (ts.Datapoint, xtime.Unit, ts.Annotation) {
return freqA < freqB
})

// Reset reuseable value frequencies
// Reset reusable value frequencies
for key := range i.valueFrequencies {
delete(i.valueFrequencies, key)
}
Expand Down
4 changes: 2 additions & 2 deletions src/dbnode/persist/fs/commitlog/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ type reader struct {

metadataLookup map[uint64]ts.Series
namespacesRead []namespaceRead
seriesIDReused *ident.ReuseableBytesID
seriesIDReused *ident.ReusableBytesID
}

type namespaceRead struct {
Expand All @@ -111,7 +111,7 @@ func newCommitLogReader(opts commitLogReaderOptions) commitLogReader {
chunkReader: newChunkReader(opts.commitLogOptions.FlushSize()),
infoDecoder: msgpack.NewDecoder(opts.commitLogOptions.FilesystemOptions().DecodingOptions()),
infoDecoderStream: msgpack.NewByteDecoderStream(nil),
seriesIDReused: ident.NewReuseableBytesID(),
seriesIDReused: ident.NewReusableBytesID(),
}
}

Expand Down
20 changes: 10 additions & 10 deletions src/dbnode/persist/fs/retriever.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ func (r *blockRetriever) AssignShardSet(shardSet sharding.ShardSet) {
func (r *blockRetriever) fetchLoop(seekerMgr DataFileSetSeekerManager) {
var (
seekerResources = NewReusableSeekerResources(r.fsOpts)
retrieverResources = newReuseableRetrieverResources()
retrieverResources = newReusableRetrieverResources()
inFlight []*retrieveRequest
currBatchReqs []*retrieveRequest
)
Expand Down Expand Up @@ -302,7 +302,7 @@ func (r *blockRetriever) filterAndCompleteWideReqs(
reqs []*retrieveRequest,
seeker ConcurrentDataFileSetSeeker,
seekerResources ReusableSeekerResources,
retrieverResources *reuseableRetrieverResources,
retrieverResources *reusableRetrieverResources,
) []*retrieveRequest {
retrieverResources.resetDataReqs()
retrieverResources.resetWideEntryReqs()
Expand Down Expand Up @@ -367,7 +367,7 @@ func (r *blockRetriever) fetchBatch(
blockStart time.Time,
allReqs []*retrieveRequest,
seekerResources ReusableSeekerResources,
retrieverResources *reuseableRetrieverResources,
retrieverResources *reusableRetrieverResources,
) {
var (
seeker ConcurrentDataFileSetSeeker
Expand Down Expand Up @@ -1045,35 +1045,35 @@ func (p *reqPool) Put(req *retrieveRequest) {
p.pool.Put(req)
}

type reuseableRetrieverResources struct {
type reusableRetrieverResources struct {
dataReqs []*retrieveRequest
wideEntryReqs []*retrieveRequest
}

func newReuseableRetrieverResources() *reuseableRetrieverResources {
return &reuseableRetrieverResources{}
func newReusableRetrieverResources() *reusableRetrieverResources {
return &reusableRetrieverResources{}
}

func (r *reuseableRetrieverResources) resetAll() {
func (r *reusableRetrieverResources) resetAll() {
r.resetDataReqs()
r.resetWideEntryReqs()
}

func (r *reuseableRetrieverResources) resetDataReqs() {
func (r *reusableRetrieverResources) resetDataReqs() {
for i := range r.dataReqs {
r.dataReqs[i] = nil
}
r.dataReqs = r.dataReqs[:0]
}

func (r *reuseableRetrieverResources) resetWideEntryReqs() {
func (r *reusableRetrieverResources) resetWideEntryReqs() {
for i := range r.wideEntryReqs {
r.wideEntryReqs[i] = nil
}
r.wideEntryReqs = r.wideEntryReqs[:0]
}

func (r *reuseableRetrieverResources) appendWideEntryReq(
func (r *reusableRetrieverResources) appendWideEntryReq(
req *retrieveRequest,
) {
r.wideEntryReqs = append(r.wideEntryReqs, req)
Expand Down
10 changes: 5 additions & 5 deletions src/dbnode/persist/fs/write.go
Original file line number Diff line number Diff line change
Expand Up @@ -459,10 +459,10 @@ func (w *writer) writeIndexFileContents(
sort.Sort(w.indexEntries)

var (
offset int64
prevID []byte
tagsReuseable = w.tagsIterator
tagsEncoder = w.tagEncoderPool.Get()
offset int64
prevID []byte
tagsReusable = w.tagsIterator
tagsEncoder = w.tagEncoderPool.Get()
)
defer tagsEncoder.Finalize()
for i, entry := range w.indexEntries {
Expand All @@ -474,7 +474,7 @@ func (w *writer) writeIndexFileContents(
return fmt.Errorf("encountered duplicate ID: %s", id)
}

tagsIter, err := metadata.ResetOrReturnProvidedTagIterator(tagsReuseable)
tagsIter, err := metadata.ResetOrReturnProvidedTagIterator(tagsReusable)
if err != nil {
return err
}
Expand Down
18 changes: 9 additions & 9 deletions src/dbnode/persist/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import (
"github.com/pborman/uuid"
)

var errReuseableTagIteratorRequired = errors.New("reuseable tags iterator is required")
var errReusableTagIteratorRequired = errors.New("reusable tags iterator is required")

// Metadata is metadata for a time series, it can
// have several underlying sources.
Expand Down Expand Up @@ -101,30 +101,30 @@ func (m Metadata) BytesID() []byte {

// ResetOrReturnProvidedTagIterator returns a tag iterator
// for the series, returning a direct ref to a provided tag
// iterator or using the reuseable tag iterator provided by the
// iterator or using the reusable tag iterator provided by the
// callsite if it needs to iterate over tags or fields.
func (m Metadata) ResetOrReturnProvidedTagIterator(
reuseableTagsIterator ident.TagsIterator,
reusableTagsIterator ident.TagsIterator,
) (ident.TagIterator, error) {
if reuseableTagsIterator == nil {
if reusableTagsIterator == nil {
// Always check to make sure callsites won't
// get a bad allocation pattern of having
// to create one here inline if the metadata
// they are passing in suddenly changes from
// tagsIter to tags or fields with metadata.
return nil, errReuseableTagIteratorRequired
return nil, errReusableTagIteratorRequired
}
if m.tagsIter != nil {
return m.tagsIter, nil
}

if len(m.tags.Values()) > 0 {
reuseableTagsIterator.Reset(m.tags)
return reuseableTagsIterator, reuseableTagsIterator.Err()
reusableTagsIterator.Reset(m.tags)
return reusableTagsIterator, reusableTagsIterator.Err()
}

reuseableTagsIterator.ResetFields(m.metadata.Fields)
return reuseableTagsIterator, reuseableTagsIterator.Err()
reusableTagsIterator.ResetFields(m.metadata.Fields)
return reusableTagsIterator, reusableTagsIterator.Err()
}

// Finalize will finalize any resources that requested
Expand Down
8 changes: 4 additions & 4 deletions src/dbnode/storage/cluster/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,9 +357,9 @@ func (d *clusterDB) analyzeAndReportShardStates() {

defer reportStats()

// Manage the reuseable vars
d.resetReuseable()
defer d.resetReuseable()
// Manage the reusable vars
d.resetReusable()
defer d.resetReusable()

for _, s := range entry.ShardSet().All() {
if s.State() == shard.Initializing {
Expand Down Expand Up @@ -441,7 +441,7 @@ func (d *clusterDB) analyzeAndReportShardStates() {
zap.Uint32s("shards", markAvailable))
}

func (d *clusterDB) resetReuseable() {
func (d *clusterDB) resetReusable() {
d.resetInitializing()
d.resetBootstrapCount()
}
Expand Down
10 changes: 5 additions & 5 deletions src/dbnode/storage/fs_merge_with_mem.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ type fsMergeWithMem struct {
retriever series.QueryableBlockRetriever
dirtySeries *dirtySeriesMap
dirtySeriesToWrite map[xtime.UnixNano]*idList
reuseableID *ident.ReuseableBytesID
reusableID *ident.ReusableBytesID
}

func newFSMergeWithMem(
Expand All @@ -56,7 +56,7 @@ func newFSMergeWithMem(
retriever: retriever,
dirtySeries: dirtySeries,
dirtySeriesToWrite: dirtySeriesToWrite,
reuseableID: ident.NewReuseableBytesID(),
reusableID: ident.NewReusableBytesID(),
}
}

Expand Down Expand Up @@ -123,13 +123,13 @@ func (m *fsMergeWithMem) ForEachRemaining(
fn fs.ForEachRemainingFn,
nsCtx namespace.Context,
) error {
reuseableID := m.reuseableID
reusableID := m.reusableID
seriesList := m.dirtySeriesToWrite[blockStart]

for seriesElement := seriesList.Front(); seriesElement != nil; seriesElement = seriesElement.Next() {
seriesMetadata := seriesElement.Value
reuseableID.Reset(seriesMetadata.ID)
mergeWithData, hasData, err := m.fetchBlocks(ctx, reuseableID, blockStart, nsCtx)
reusableID.Reset(seriesMetadata.ID)
mergeWithData, hasData, err := m.fetchBlocks(ctx, reusableID, blockStart, nsCtx)
if err != nil {
return err
}
Expand Down
20 changes: 8 additions & 12 deletions src/dbnode/storage/namespace.go
Original file line number Diff line number Diff line change
Expand Up @@ -1187,7 +1187,7 @@ type idAndBlockStart struct {
blockStart xtime.UnixNano
}

type coldFlushReuseableResources struct {
type coldFlushReusableResources struct {
// dirtySeries is a map from a composite key of <series ID, block start>
// to an element in a list in the dirtySeriesToWrite map. This map is used
// to quickly test whether a series is dirty for a particular block start.
Expand All @@ -1209,22 +1209,22 @@ type coldFlushReuseableResources struct {
fsReader fs.DataFileSetReader
}

func newColdFlushReuseableResources(opts Options) (coldFlushReuseableResources, error) {
func newColdFlushReusableResources(opts Options) coldFlushReusableResources {
fsReader, err := fs.NewReader(opts.BytesPool(), opts.CommitLogOptions().FilesystemOptions())
if err != nil {
return coldFlushReuseableResources{}, nil
return coldFlushReusableResources{}
}

return coldFlushReuseableResources{
return coldFlushReusableResources{
dirtySeries: newDirtySeriesMap(),
dirtySeriesToWrite: make(map[xtime.UnixNano]*idList),
// TODO(juchan): set pool options.
idElementPool: newIDElementPool(nil),
fsReader: fsReader,
}, nil
}
}

func (r *coldFlushReuseableResources) reset() {
func (r *coldFlushReusableResources) reset() {
for _, seriesList := range r.dirtySeriesToWrite {
if seriesList != nil {
seriesList.Reset()
Expand Down Expand Up @@ -1259,12 +1259,7 @@ func (n *dbNamespace) ColdFlush(flushPersist persist.FlushPreparer) error {
}

shards := n.OwnedShards()

resources, err := newColdFlushReuseableResources(n.opts)
if err != nil {
n.metrics.flushColdData.ReportError(n.nowFn().Sub(callStart))
return err
}
resources := newColdFlushReusableResources(n.opts)

// NB(bodu): The in-mem index will lag behind the TSDB in terms of new series writes. For a period of
// time between when we rotate out the active cold mutable index segments (happens here) and when
Expand All @@ -1273,6 +1268,7 @@ func (n *dbNamespace) ColdFlush(flushPersist persist.FlushPreparer) error {
// where they will be evicted from the in-mem index.
var (
onColdFlushDone OnColdFlushDone
err error
)
if n.reverseIndex != nil {
onColdFlushDone, err = n.reverseIndex.ColdFlush(shards)
Expand Down
2 changes: 1 addition & 1 deletion src/dbnode/storage/shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -2287,7 +2287,7 @@ func (s *dbShard) WarmFlush(

func (s *dbShard) ColdFlush(
flushPreparer persist.FlushPreparer,
resources coldFlushReuseableResources,
resources coldFlushReusableResources,
nsCtx namespace.Context,
onFlushSeries persist.OnFlushSeries,
) (ShardColdFlush, error) {
Expand Down
4 changes: 2 additions & 2 deletions src/dbnode/storage/shard_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -638,7 +638,7 @@ func TestShardColdFlush(t *testing.T) {

preparer := persist.NewMockFlushPreparer(ctrl)
fsReader := fs.NewMockDataFileSetReader(ctrl)
resources := coldFlushReuseableResources{
resources := coldFlushReusableResources{
dirtySeries: newDirtySeriesMap(),
dirtySeriesToWrite: make(map[xtime.UnixNano]*idList),
idElementPool: newIDElementPool(nil),
Expand Down Expand Up @@ -711,7 +711,7 @@ func TestShardColdFlushNoMergeIfNothingDirty(t *testing.T) {
dirtySeriesToWrite[xtime.ToUnixNano(t2)] = newIDList(idElementPool)
dirtySeriesToWrite[xtime.ToUnixNano(t3)] = newIDList(idElementPool)

resources := coldFlushReuseableResources{
resources := coldFlushReusableResources{
dirtySeries: newDirtySeriesMap(),
dirtySeriesToWrite: dirtySeriesToWrite,
idElementPool: idElementPool,
Expand Down
2 changes: 1 addition & 1 deletion src/dbnode/storage/storage_mock.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit dc6eadf

Please sign in to comment.