From a42a09c560fd790c4dcc14fd6ce0a39f4c539f9c Mon Sep 17 00:00:00 2001 From: Prateek Rungta Date: Mon, 8 Apr 2019 13:28:10 -0400 Subject: [PATCH] [dbnode] Aggregate() using only FSTs where possible --- src/dbnode/generated-source-files.mk | 17 +- src/dbnode/storage/index.go | 134 ++- src/dbnode/storage/index/aggregate_results.go | 43 + .../aggregate_results_entry_arraypool_gen.go | 127 +++ src/dbnode/storage/index/block.go | 261 ++++- src/dbnode/storage/index/block_prop_test.go | 7 +- src/dbnode/storage/index/block_test.go | 264 +++++ .../index/field_terms_iterator_prop_test.go | 209 ++++ .../index/field_terms_iterator_test.go | 360 +++++++ .../storage/index/fields_terms_iterator.go | 187 ++++ src/dbnode/storage/index/index_mock.go | 166 +++ src/dbnode/storage/index/options.go | 52 +- src/dbnode/storage/index/types.go | 56 +- src/dbnode/storage/index_block_test.go | 110 +- src/m3ninx/generated-source-files.mk | 9 +- src/m3ninx/generated/mocks/generate.go | 2 +- .../index/segment/fst/fst_terms_iterator.go | 3 +- .../fst/fst_terms_postings_iterator.go | 5 +- src/m3ninx/index/segment/segment_mock.go | 985 ++++++++++++++---- src/m3ninx/search/proptest/query_gen.go | 2 - 20 files changed, 2694 insertions(+), 305 deletions(-) create mode 100644 src/dbnode/storage/index/aggregate_results_entry_arraypool_gen.go create mode 100644 src/dbnode/storage/index/field_terms_iterator_prop_test.go create mode 100644 src/dbnode/storage/index/field_terms_iterator_test.go create mode 100644 src/dbnode/storage/index/fields_terms_iterator.go diff --git a/src/dbnode/generated-source-files.mk b/src/dbnode/generated-source-files.mk index af9e4778c4..be579cbc04 100644 --- a/src/dbnode/generated-source-files.mk +++ b/src/dbnode/generated-source-files.mk @@ -172,7 +172,9 @@ genny-map-storage-index-aggregation-results: genny-map-storage-index-aggregate-v # generation rule for all generated arraypools .PHONY: genny-arraypool-all -genny-arraypool-all: genny-arraypool-node-segments +genny-arraypool-all: \ + genny-arraypool-node-segments \ + genny-arraypool-aggregate-results-entry \ # arraypool generation rule for ./network/server/tchannelthrift/node/segmentsArrayPool .PHONY: genny-arraypool-node-segments @@ -186,6 +188,19 @@ genny-arraypool-node-segments: rename_type_middle=Segments \ rename_constructor=newSegmentsArrayPool +# arraypool generation rule for ./storage/index/AggregateResultsEntryArrayPool +.PHONY: genny-arraypool-aggregate-results-entry +genny-arraypool-aggregate-results-entry: + cd $(m3x_package_path) && make genny-arraypool \ + pkg=index \ + elem_type=AggregateResultsEntry \ + target_package=$(m3db_package)/src/dbnode/storage/index \ + out_file=aggregate_results_entry_arraypool_gen.go \ + rename_type_prefix=AggregateResultsEntry \ + rename_type_middle=AggregateResultsEntry \ + rename_constructor=NewAggregateResultsEntryArrayPool \ + rename_gen_types=true \ + # generation rule for all generated leakcheckpools .PHONY: genny-leakcheckpool-all genny-leakcheckpool-all: \ diff --git a/src/dbnode/storage/index.go b/src/dbnode/storage/index.go index 21a7359b9e..528d2beca6 100644 --- a/src/dbnode/storage/index.go +++ b/src/dbnode/storage/index.go @@ -41,6 +41,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/index/convert" "github.com/m3db/m3/src/dbnode/storage/namespace" "github.com/m3db/m3/src/m3ninx/doc" + "github.com/m3db/m3/src/m3ninx/idx" m3ninxindex "github.com/m3db/m3/src/m3ninx/index" "github.com/m3db/m3/src/m3ninx/index/segment" "github.com/m3db/m3/src/m3ninx/index/segment/builder" @@ -72,6 +73,10 @@ const ( nsIndexReportStatsInterval = 10 * time.Second ) +var ( + allQuery = idx.NewAllQuery() +) + // nolint: maligned type nsIndex struct { state nsIndexState @@ -167,6 +172,23 @@ type newNamespaceIndexOpts struct { newBlockFn newBlockFn } +// execBlockQueryFn executes a query against the given block whilst tracking state. +type execBlockQueryFn func( + cancellable *resource.CancellableLifetime, + block index.Block, + query index.Query, + opts index.QueryOptions, + state *asyncQueryExecState, + results index.BaseResults, +) + +// asyncQueryExecState tracks the async execution errors and results for a query. +type asyncQueryExecState struct { + sync.Mutex + multiErr xerrors.MultiError + exhaustive bool +} + // newNamespaceIndex returns a new namespaceIndex for the provided namespace. func newNamespaceIndex( nsMD namespace.Metadata, @@ -265,6 +287,7 @@ func newNamespaceIndexWithOptions( queryWorkersPool: newIndexOpts.opts.QueryIDsWorkerPool(), metrics: newNamespaceIndexMetrics(indexOpts, instrumentOpts), } + if runtimeOptsMgr != nil { idx.runtimeOptsListener = runtimeOptsMgr.RegisterListener(idx) } @@ -869,7 +892,7 @@ func (i *nsIndex) Query( SizeLimit: opts.Limit, }) ctx.RegisterFinalizer(results) - exhaustive, err := i.query(ctx, query, results, opts) + exhaustive, err := i.query(ctx, query, results, opts, i.execBlockQueryFn) if err != nil { return index.QueryResult{}, err } @@ -892,7 +915,12 @@ func (i *nsIndex) AggregateQuery( Type: opts.Type, }) ctx.RegisterFinalizer(results) - exhaustive, err := i.query(ctx, query, results, opts.QueryOptions) + // use appropriate fn to query underlying blocks. + fn := i.execBlockQueryFn + if query.Equal(allQuery) { + fn = i.execBlockAggregateQueryFn + } + exhaustive, err := i.query(ctx, query, results, opts.QueryOptions, fn) if err != nil { return index.AggregateQueryResult{}, err } @@ -907,6 +935,7 @@ func (i *nsIndex) query( query index.Query, results index.BaseResults, opts index.QueryOptions, + execBlockFn execBlockQueryFn, ) (bool, error) { // Capture start before needing to acquire lock. start := i.nowFn() @@ -942,17 +971,12 @@ func (i *nsIndex) query( } var ( - deadline = start.Add(timeout) - wg sync.WaitGroup - // State contains concurrent mutable state for async execution below. - state = struct { - sync.Mutex - multiErr xerrors.MultiError - exhaustive bool - }{ + state = asyncQueryExecState{ exhaustive: true, } + deadline = start.Add(timeout) + wg sync.WaitGroup ) // Create a cancellable lifetime and cancel it at end of this method so that @@ -960,32 +984,6 @@ func (i *nsIndex) query( cancellable := resource.NewCancellableLifetime() defer cancellable.Cancel() - execBlockQuery := func(block index.Block) { - blockExhaustive, err := block.Query(cancellable, query, opts, results) - if err == index.ErrUnableToQueryBlockClosed { - // NB(r): Because we query this block outside of the results lock, it's - // possible this block may get closed if it slides out of retention, in - // that case those results are no longer considered valid and outside of - // retention regardless, so this is a non-issue. - err = nil - } - - state.Lock() - defer state.Unlock() - - if err != nil { - state.multiErr = state.multiErr.Add(err) - return - } - - if blockExhaustive { - return - } - - // If block had more data but we stopped early, need to notify caller. - state.exhaustive = false - } - for _, block := range blocks { // Capture block for async query execution below. block := block @@ -1011,7 +1009,7 @@ func (i *nsIndex) query( // No timeout, just wait blockingly for a worker. wg.Add(1) i.queryWorkersPool.Go(func() { - execBlockQuery(block) + execBlockFn(cancellable, block, query, opts, &state, results) wg.Done() }) continue @@ -1022,7 +1020,7 @@ func (i *nsIndex) query( if timeLeft := deadline.Sub(i.nowFn()); timeLeft > 0 { wg.Add(1) timedOut := !i.queryWorkersPool.GoWithTimeout(func() { - execBlockQuery(block) + execBlockFn(cancellable, block, query, opts, &state, results) wg.Done() }, timeLeft) @@ -1087,6 +1085,66 @@ func (i *nsIndex) query( return exhaustive, nil } +func (i *nsIndex) execBlockQueryFn( + cancellable *resource.CancellableLifetime, + block index.Block, + query index.Query, + opts index.QueryOptions, + state *asyncQueryExecState, + results index.BaseResults, +) { + blockExhaustive, err := block.Query(cancellable, query, opts, results) + if err == index.ErrUnableToQueryBlockClosed { + // NB(r): Because we query this block outside of the results lock, it's + // possible this block may get closed if it slides out of retention, in + // that case those results are no longer considered valid and outside of + // retention regardless, so this is a non-issue. + err = nil + } + + state.Lock() + defer state.Unlock() + + if err != nil { + state.multiErr = state.multiErr.Add(err) + } + state.exhaustive = state.exhaustive && blockExhaustive +} + +func (i *nsIndex) execBlockAggregateQueryFn( + cancellable *resource.CancellableLifetime, + block index.Block, + query index.Query, + opts index.QueryOptions, + state *asyncQueryExecState, + results index.BaseResults, +) { + aggResults, ok := results.(index.AggregateResults) + if !ok { // should never happen + state.Lock() + state.multiErr = state.multiErr.Add( + fmt.Errorf("unknown results type [%T] received during aggregation", results)) + state.Unlock() + return + } + + blockExhaustive, err := block.Aggregate(cancellable, opts, aggResults) + if err == index.ErrUnableToQueryBlockClosed { + // NB(r): Because we query this block outside of the results lock, it's + // possible this block may get closed if it slides out of retention, in + // that case those results are no longer considered valid and outside of + // retention regardless, so this is a non-issue. + err = nil + } + + state.Lock() + defer state.Unlock() + if err != nil { + state.multiErr = state.multiErr.Add(err) + } + state.exhaustive = state.exhaustive && blockExhaustive +} + func (i *nsIndex) timeoutForQueryWithRLock( ctx context.Context, ) time.Duration { diff --git a/src/dbnode/storage/index/aggregate_results.go b/src/dbnode/storage/index/aggregate_results.go index a35661f470..9ccc7b383b 100644 --- a/src/dbnode/storage/index/aggregate_results.go +++ b/src/dbnode/storage/index/aggregate_results.go @@ -104,6 +104,49 @@ func (r *aggregatedResults) AddDocuments(batch []doc.Document) (int, error) { return size, err } +func (r *aggregatedResults) AggregateResultsOptions() AggregateResultsOptions { + return r.aggregateOpts +} + +func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) int { + r.Lock() + for _, entry := range batch { + f := entry.Field + aggValues, ok := r.resultsMap.Get(f) + if !ok { + aggValues = r.valuesPool.Get() + // we can avoid the copy because we assume ownership of the passed ident.ID, + // but still need to finalize it. + r.resultsMap.SetUnsafe(f, aggValues, AggregateResultsMapSetUnsafeOptions{ + NoCopyKey: true, + NoFinalizeKey: false, + }) + } else { + // because we already have a entry for this field, we release the ident back to + // the underlying pool. + f.Finalize() + } + valuesMap := aggValues.Map() + for _, t := range entry.Terms { + if !valuesMap.Contains(t) { + // we can avoid the copy because we assume ownership of the passed ident.ID, + // but still need to finalize it. + valuesMap.SetUnsafe(t, struct{}{}, AggregateValuesMapSetUnsafeOptions{ + NoCopyKey: true, + NoFinalizeKey: false, + }) + } else { + // because we already have a entry for this term, we release the ident back to + // the underlying pool. + t.Finalize() + } + } + } + size := r.resultsMap.Len() + r.Unlock() + return size +} + func (r *aggregatedResults) addDocumentsBatchWithLock( batch []doc.Document, ) error { diff --git a/src/dbnode/storage/index/aggregate_results_entry_arraypool_gen.go b/src/dbnode/storage/index/aggregate_results_entry_arraypool_gen.go new file mode 100644 index 0000000000..66ac84180c --- /dev/null +++ b/src/dbnode/storage/index/aggregate_results_entry_arraypool_gen.go @@ -0,0 +1,127 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// This file was automatically generated by genny. +// Any changes will be lost if this file is regenerated. +// see https://github.com/mauricelam/genny + +package index + +import ( + "github.com/m3db/m3/src/x/pool" +) + +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// AggregateResultsEntryArrayPool provides a pool for aggregateResultsEntry slices. +type AggregateResultsEntryArrayPool interface { + // Init initializes the array pool, it needs to be called + // before Get/Put use. + Init() + + // Get returns the a slice from the pool. + Get() []AggregateResultsEntry + + // Put returns the provided slice to the pool. + Put(elems []AggregateResultsEntry) +} + +type AggregateResultsEntryFinalizeFn func([]AggregateResultsEntry) []AggregateResultsEntry + +type AggregateResultsEntryArrayPoolOpts struct { + Options pool.ObjectPoolOptions + Capacity int + MaxCapacity int + FinalizeFn AggregateResultsEntryFinalizeFn +} + +type AggregateResultsEntryArrPool struct { + opts AggregateResultsEntryArrayPoolOpts + pool pool.ObjectPool +} + +func NewAggregateResultsEntryArrayPool(opts AggregateResultsEntryArrayPoolOpts) AggregateResultsEntryArrayPool { + if opts.FinalizeFn == nil { + opts.FinalizeFn = defaultAggregateResultsEntryFinalizerFn + } + p := pool.NewObjectPool(opts.Options) + return &AggregateResultsEntryArrPool{opts, p} +} + +func (p *AggregateResultsEntryArrPool) Init() { + p.pool.Init(func() interface{} { + return make([]AggregateResultsEntry, 0, p.opts.Capacity) + }) +} + +func (p *AggregateResultsEntryArrPool) Get() []AggregateResultsEntry { + return p.pool.Get().([]AggregateResultsEntry) +} + +func (p *AggregateResultsEntryArrPool) Put(arr []AggregateResultsEntry) { + arr = p.opts.FinalizeFn(arr) + if max := p.opts.MaxCapacity; max > 0 && cap(arr) > max { + return + } + p.pool.Put(arr) +} + +func defaultAggregateResultsEntryFinalizerFn(elems []AggregateResultsEntry) []AggregateResultsEntry { + var empty AggregateResultsEntry + for i := range elems { + elems[i] = empty + } + elems = elems[:0] + return elems +} + +type AggregateResultsEntryArr []AggregateResultsEntry + +func (elems AggregateResultsEntryArr) grow(n int) []AggregateResultsEntry { + if cap(elems) < n { + elems = make([]AggregateResultsEntry, n) + } + elems = elems[:n] + // following compiler optimized memcpy impl + // https://github.com/golang/go/wiki/CompilerOptimizations#optimized-memclr + var empty AggregateResultsEntry + for i := range elems { + elems[i] = empty + } + return elems +} diff --git a/src/dbnode/storage/index/block.go b/src/dbnode/storage/index/block.go index 055fe73fd1..aa04cd9df1 100644 --- a/src/dbnode/storage/index/block.go +++ b/src/dbnode/storage/index/block.go @@ -21,6 +21,7 @@ package index import ( + "bytes" "errors" "fmt" "sync" @@ -39,6 +40,7 @@ import ( "github.com/m3db/m3/src/m3ninx/search/executor" "github.com/m3db/m3/src/x/context" xerrors "github.com/m3db/m3/src/x/errors" + "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/instrument" "github.com/m3db/m3/src/x/resource" xtime "github.com/m3db/m3/src/x/time" @@ -75,7 +77,8 @@ const ( blockStateSealed blockStateClosed - defaultQueryDocsBatchSize = 256 + defaultQueryDocsBatchSize = 256 + defaultAggregateResultsEntryBatchSize = 256 compactDebugLogEvery = 1 // Emit debug log for every compaction ) @@ -105,14 +108,15 @@ type block struct { backgroundSegments []*readableSeg shardRangesSegments []blockShardRangesSegments - newExecutorFn newExecutorFn - blockStart time.Time - blockEnd time.Time - blockSize time.Duration - blockOpts BlockOptions - opts Options - iopts instrument.Options - nsMD namespace.Metadata + newFieldsAndTermsIteratorFn newFieldsAndTermsIteratorFn + newExecutorFn newExecutorFn + blockStart time.Time + blockEnd time.Time + blockSize time.Duration + blockOpts BlockOptions + opts Options + iopts instrument.Options + nsMD namespace.Metadata compact blockCompact @@ -175,13 +179,13 @@ func NewBlock( blockStart: blockStart, blockEnd: blockStart.Add(blockSize), blockSize: blockSize, - blockOpts: opts, opts: indexOpts, iopts: iopts, nsMD: md, metrics: newBlockMetrics(iopts.MetricsScope()), logger: iopts.Logger(), } + b.newFieldsAndTermsIteratorFn = newFieldsAndTermsIterator b.newExecutorFn = b.executorWithRLock return b, nil @@ -728,6 +732,31 @@ func (b *block) executorWithRLock() (search.Executor, error) { return executor.NewExecutor(readers), nil } +func (b *block) segmentsWithRLock() []segment.Segment { + numSegments := len(b.foregroundSegments) + len(b.backgroundSegments) + for _, group := range b.shardRangesSegments { + numSegments += len(group.segments) + } + + segments := make([]segment.Segment, 0, numSegments) + // Add foreground & background segments. + for _, seg := range b.foregroundSegments { + segments = append(segments, seg.Segment()) + } + for _, seg := range b.backgroundSegments { + segments = append(segments, seg.Segment()) + } + + // Loop over the segments associated to shard time ranges. + for _, group := range b.shardRangesSegments { + for _, seg := range group.segments { + segments = append(segments, seg) + } + } + + return segments +} + // Query acquires a read lock on the block so that the segments // are guaranteed to not be freed/released while accumulating results. // This allows references to the mmap'd segment data to be accumulated @@ -823,30 +852,226 @@ func (b *block) addQueryResults( results BaseResults, batch []doc.Document, ) ([]doc.Document, int, error) { - // Checkout the lifetime of the query before adding results + // checkout the lifetime of the query before adding results. queryValid := cancellable.TryCheckout() if !queryValid { - // Query not valid any longer, do not add results and return early + // query not valid any longer, do not add results and return early. return batch, 0, errCancelledQuery } - // Try to add the docs to the resource + // try to add the docs to the resource. size, err := results.AddDocuments(batch) - // Immediately release the checkout on the lifetime of query + // immediately release the checkout on the lifetime of query. cancellable.ReleaseCheckout() - // Reset batch + // reset batch. var emptyDoc doc.Document for i := range batch { batch[i] = emptyDoc } batch = batch[:0] - // Return results + // return results. return batch, size, err } +// Aggregate acquires a read lock on the block so that the segments +// are guaranteed to not be freed/released while accumulating results. +// NB: Aggregate is an optimization of the general aggregate Query approach +// for the case when we can skip going to raw documents, and instead rely on +// pre-aggregated results via the FST underlying the index. +func (b *block) Aggregate( + cancellable *resource.CancellableLifetime, + opts QueryOptions, + results AggregateResults, +) (bool, error) { + b.RLock() + defer b.RUnlock() + + if b.state == blockStateClosed { + return false, ErrUnableToQueryBlockClosed + } + + aggOpts := results.AggregateResultsOptions() + iterateTerms := aggOpts.Type == AggregateTagNamesAndValues + iterateOpts := fieldsAndTermsIteratorOpts{ + iterateTerms: iterateTerms, + allowFn: func(field []byte) bool { + // skip any field names that we shouldn't allow. + if bytes.Equal(field, doc.IDReservedFieldName) { + return false + } + return aggOpts.TermFilter.Allow(field) + }, + } + + iter, err := b.newFieldsAndTermsIteratorFn(nil, iterateOpts) + if err != nil { + return false, err + } + + var ( + size = results.Size() + batch = b.opts.AggregateResultsEntryArrayPool().Get() + batchSize = cap(batch) + iterClosed = false // tracking whether we need to free the iterator at the end. + ) + if batchSize == 0 { + batchSize = defaultAggregateResultsEntryBatchSize + } + + // cleanup at the end + defer func() { + b.opts.AggregateResultsEntryArrayPool().Put(batch) + if !iterClosed { + iter.Close() + } + }() + + segs := b.segmentsWithRLock() + for _, s := range segs { + if opts.LimitExceeded(size) { + break + } + + err = iter.Reset(s, iterateOpts) + if err != nil { + return false, err + } + iterClosed = false // only once the iterator has been successfully Reset(). + + for iter.Next() { + if opts.LimitExceeded(size) { + break + } + + field, term := iter.Current() + batch = b.appendFieldAndTermToBatch(batch, field, term, iterateTerms) + if len(batch) < batchSize { + continue + } + + batch, size, err = b.addAggregateResults(cancellable, results, batch) + if err != nil { + return false, err + } + } + + if err := iter.Err(); err != nil { + return false, err + } + + iterClosed = true + if err := iter.Close(); err != nil { + return false, err + } + } + + // Add last batch to results if remaining. + if len(batch) > 0 { + batch, size, err = b.addAggregateResults(cancellable, results, batch) + if err != nil { + return false, err + } + } + + exhaustive := !opts.LimitExceeded(size) + return exhaustive, nil +} + +func (b *block) appendFieldAndTermToBatch( + batch []AggregateResultsEntry, + field, term []byte, + includeTerms bool, +) []AggregateResultsEntry { + // NB(prateek): we make a copy of the (field, term) entries returned + // by the iterator during traversal, because the []byte are only valid per entry during + // the traversal (i.e. calling Next() invalidates the []byte). We choose to do this + // instead of checking if the entry is required (duplicates may exist in the results map + // already), as it reduces contention on the map itself. Further, the ownership of these + // idents is transferred to the results map, which either hangs on to them (if they are new), + // or finalizes them if they are duplicates. + var ( + entry AggregateResultsEntry + lastField []byte + lastFieldIsValid bool + reuseLastEntry bool + ) + // we are iterating multiple segments so we may receive duplicates (same field/term), but + // as we are iterating one segment at a time, and because the underlying index structures + // are FSTs, we rely on the fact that iterator traversal is in order to avoid creating duplicate + // entries for the same fields, by checking the last batch entry to see if the bytes are + // the same. + // It's easier to consider an example, say we have a segment with fields/terms: + // (f1, t1), (f1, t2), ..., (fn, t1), ..., (fn, tn) + // as we iterate in order, we receive (f1, t1) and then (f1, t2) we can avoid the repeated f1 + // allocation if the previous entry has the same value. + // NB: this isn't strictly true because when we switch iterating between segments, + // the fields/terms switch in an order which doesn't have to be strictly lexicographic. In that + // instance however, the only downside is we would be allocating more. i.e. this is just an + // optimisation, it doesn't affect correctness. + if len(batch) > 0 { + lastFieldIsValid = true + lastField = batch[len(batch)-1].Field.Bytes() + } + if lastFieldIsValid && bytes.Equal(lastField, field) { + reuseLastEntry = true + entry = batch[len(batch)-1] // avoid alloc cause we already have the field + } else { + entry.Field = b.pooledID(field) // allocate id because this is the first time we've seen it + } + + if includeTerms { + // terms are always new (as far we know without checking the map for duplicates), so we allocate + entry.Terms = append(entry.Terms, b.pooledID(term)) + } + + if reuseLastEntry { + batch[len(batch)-1] = entry + } else { + batch = append(batch, entry) + } + return batch +} + +func (b *block) pooledID(id []byte) ident.ID { + data := b.opts.CheckedBytesPool().Get(len(id)) + data.IncRef() + data.AppendAll(id) + data.DecRef() + return b.opts.IdentifierPool().BinaryID(data) +} + +func (b *block) addAggregateResults( + cancellable *resource.CancellableLifetime, + results AggregateResults, + batch []AggregateResultsEntry, +) ([]AggregateResultsEntry, int, error) { + // checkout the lifetime of the query before adding results. + queryValid := cancellable.TryCheckout() + if !queryValid { + // query not valid any longer, do not add results and return early. + return batch, 0, errCancelledQuery + } + + // try to add the docs to the resource. + size := results.AddFields(batch) + + // immediately release the checkout on the lifetime of query. + cancellable.ReleaseCheckout() + + // reset batch. + var emptyField AggregateResultsEntry + for i := range batch { + batch[i] = emptyField + } + batch = batch[:0] + + // return results. + return batch, size, nil +} + func (b *block) AddResults( results result.IndexBlock, ) error { @@ -879,7 +1104,7 @@ func (b *block) AddResults( for _, seg := range segments { readThroughSeg := seg if _, ok := seg.(segment.MutableSegment); !ok { - // Only wrap the immutable segments with a read through cache. + // only wrap the immutable segments with a read through cache. readThroughSeg = NewReadThroughSegment(seg, plCache, readThroughOpts) } readThroughSegments = append(readThroughSegments, readThroughSeg) @@ -890,7 +1115,7 @@ func (b *block) AddResults( segments: readThroughSegments, } - // First see if this block can cover all our current blocks covering shard + // first see if this block can cover all our current blocks covering shard // time ranges. currFulfilled := make(result.ShardTimeRanges) for _, existing := range b.shardRangesSegments { diff --git a/src/dbnode/storage/index/block_prop_test.go b/src/dbnode/storage/index/block_prop_test.go index 2887ebd39d..12e2b248aa 100644 --- a/src/dbnode/storage/index/block_prop_test.go +++ b/src/dbnode/storage/index/block_prop_test.go @@ -37,9 +37,8 @@ import ( "github.com/m3db/m3/src/m3ninx/index/segment/fst" "github.com/m3db/m3/src/m3ninx/search" "github.com/m3db/m3/src/m3ninx/search/proptest" - "github.com/m3db/m3/src/m3ninx/util" - "github.com/m3db/m3/src/x/resource" "github.com/m3db/m3/src/x/instrument" + "github.com/m3db/m3/src/x/resource" "github.com/leanovate/gopter" "github.com/leanovate/gopter/prop" @@ -47,9 +46,7 @@ import ( ) var ( - testFstOptions = fst.NewOptions() - testBlockSize = time.Hour - lotsTestDocuments = util.MustReadDocs("../../../m3ninx/util/testdata/node_exporter.json", 2000) + testBlockSize = time.Hour ) // TestPostingsListCacheDoesNotAffectBlockQueryResults verifies that the postings list diff --git a/src/dbnode/storage/index/block_test.go b/src/dbnode/storage/index/block_test.go index 289e02ea40..c2a0e12d9e 100644 --- a/src/dbnode/storage/index/block_test.go +++ b/src/dbnode/storage/index/block_test.go @@ -36,6 +36,7 @@ import ( "github.com/m3db/m3/src/m3ninx/index/segment/mem" "github.com/m3db/m3/src/m3ninx/search" "github.com/m3db/m3/src/x/ident" + "github.com/m3db/m3/src/x/pool" "github.com/m3db/m3/src/x/resource" xtime "github.com/m3db/m3/src/x/time" @@ -1506,6 +1507,269 @@ func TestBlockWriteBackgroundCompact(t *testing.T) { b.RUnlock() } +func TestBlockAggregateAfterClose(t *testing.T) { + testMD := newTestNSMetadata(t) + start := time.Now().Truncate(time.Hour) + b, err := NewBlock(start, testMD, BlockOptions{}, testOpts) + require.NoError(t, err) + + require.Equal(t, start, b.StartTime()) + require.Equal(t, start.Add(time.Hour), b.EndTime()) + require.NoError(t, b.Close()) + + _, err = b.Aggregate(resource.NewCancellableLifetime(), + QueryOptions{}, nil) + require.Error(t, err) +} + +func TestBlockAggregateIterationErr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testMD := newTestNSMetadata(t) + start := time.Now().Truncate(time.Hour) + blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts) + require.NoError(t, err) + + b, ok := blk.(*block) + require.True(t, ok) + + seg1 := segment.NewMockMutableSegment(ctrl) + + b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)} + iter := NewMockfieldsAndTermsIterator(ctrl) + b.newFieldsAndTermsIteratorFn = func( + s segment.Segment, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) { + return iter, nil + } + + results := NewAggregateResults(ident.StringID("ns"), AggregateResultsOptions{ + SizeLimit: 3, + Type: AggregateTagNamesAndValues, + }, testOpts) + + gomock.InOrder( + iter.EXPECT().Reset(seg1, gomock.Any()).Return(nil), + iter.EXPECT().Next().Return(true), + iter.EXPECT().Current().Return([]byte("f1"), []byte("t1")), + iter.EXPECT().Next().Return(false), + iter.EXPECT().Err().Return(fmt.Errorf("unknown error")), + iter.EXPECT().Close().Return(nil), + ) + _, err = b.Aggregate(resource.NewCancellableLifetime(), QueryOptions{Limit: 3}, results) + require.Error(t, err) +} + +func TestBlockAggregate(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testMD := newTestNSMetadata(t) + start := time.Now().Truncate(time.Hour) + blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts) + require.NoError(t, err) + + b, ok := blk.(*block) + require.True(t, ok) + + seg1 := segment.NewMockMutableSegment(ctrl) + + b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)} + iter := NewMockfieldsAndTermsIterator(ctrl) + b.newFieldsAndTermsIteratorFn = func( + s segment.Segment, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) { + return iter, nil + } + + results := NewAggregateResults(ident.StringID("ns"), AggregateResultsOptions{ + SizeLimit: 3, + Type: AggregateTagNamesAndValues, + }, testOpts) + + gomock.InOrder( + iter.EXPECT().Reset(seg1, gomock.Any()).Return(nil), + iter.EXPECT().Next().Return(true), + iter.EXPECT().Current().Return([]byte("f1"), []byte("t1")), + iter.EXPECT().Next().Return(true), + iter.EXPECT().Current().Return([]byte("f1"), []byte("t2")), + iter.EXPECT().Next().Return(true), + iter.EXPECT().Current().Return([]byte("f2"), []byte("t1")), + iter.EXPECT().Next().Return(true), + iter.EXPECT().Current().Return([]byte("f1"), []byte("t3")), + iter.EXPECT().Next().Return(false), + iter.EXPECT().Err().Return(nil), + iter.EXPECT().Close().Return(nil), + ) + exhaustive, err := b.Aggregate(resource.NewCancellableLifetime(), QueryOptions{Limit: 3}, results) + require.NoError(t, err) + require.True(t, exhaustive) + + assertAggregateResultsMapEquals(t, map[string][]string{ + "f1": []string{"t1", "t2", "t3"}, + "f2": []string{"t1"}, + }, results) +} + +func TestBlockAggregateNotExhaustive(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testMD := newTestNSMetadata(t) + start := time.Now().Truncate(time.Hour) + + aggResultsEntryArrayPool := NewAggregateResultsEntryArrayPool(AggregateResultsEntryArrayPoolOpts{ + Options: pool.NewObjectPoolOptions(). + SetSize(aggregateResultsEntryArrayPoolSize), + Capacity: 1, + MaxCapacity: 1, + }) + aggResultsEntryArrayPool.Init() + opts := testOpts.SetAggregateResultsEntryArrayPool(aggResultsEntryArrayPool) + + blk, err := NewBlock(start, testMD, BlockOptions{}, opts) + require.NoError(t, err) + + b, ok := blk.(*block) + require.True(t, ok) + + seg1 := segment.NewMockMutableSegment(ctrl) + + b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)} + iter := NewMockfieldsAndTermsIterator(ctrl) + b.newFieldsAndTermsIteratorFn = func( + s segment.Segment, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) { + return iter, nil + } + + results := NewAggregateResults(ident.StringID("ns"), AggregateResultsOptions{ + SizeLimit: 1, + Type: AggregateTagNamesAndValues, + }, testOpts) + + gomock.InOrder( + iter.EXPECT().Reset(seg1, gomock.Any()).Return(nil), + iter.EXPECT().Next().Return(true), + iter.EXPECT().Current().Return([]byte("f1"), []byte("t1")), + iter.EXPECT().Next().Return(true), + iter.EXPECT().Err().Return(nil), + iter.EXPECT().Close().Return(nil), + ) + exhaustive, err := b.Aggregate(resource.NewCancellableLifetime(), QueryOptions{Limit: 1}, results) + require.NoError(t, err) + require.False(t, exhaustive) + + assertAggregateResultsMapEquals(t, map[string][]string{ + "f1": []string{"t1"}, + }, results) +} + +func TestBlockE2EInsertAggregate(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + blockSize := time.Hour + + testMD := newTestNSMetadata(t) + now := time.Now() + blockStart := now.Truncate(blockSize) + + nowNotBlockStartAligned := now. + Truncate(blockSize). + Add(time.Minute) + + // Use a larger batch size to simulate large number in a batch + // coming back (to ensure code path for reusing buffers for iterator + // is covered). + testOpts := optionsWithDocsArrayPool(testOpts, 16, 256) + + blk, err := NewBlock(blockStart, testMD, + BlockOptions{ + ForegroundCompactorMmapDocsData: true, + BackgroundCompactorMmapDocsData: true, + }, testOpts) + require.NoError(t, err) + b, ok := blk.(*block) + require.True(t, ok) + + h1 := NewMockOnIndexSeries(ctrl) + h1.EXPECT().OnIndexFinalize(xtime.ToUnixNano(blockStart)) + h1.EXPECT().OnIndexSuccess(xtime.ToUnixNano(blockStart)) + + h2 := NewMockOnIndexSeries(ctrl) + h2.EXPECT().OnIndexFinalize(xtime.ToUnixNano(blockStart)) + h2.EXPECT().OnIndexSuccess(xtime.ToUnixNano(blockStart)) + + h3 := NewMockOnIndexSeries(ctrl) + h3.EXPECT().OnIndexFinalize(xtime.ToUnixNano(blockStart)) + h3.EXPECT().OnIndexSuccess(xtime.ToUnixNano(blockStart)) + + batch := NewWriteBatch(WriteBatchOptions{ + IndexBlockSize: blockSize, + }) + batch.Append(WriteBatchEntry{ + Timestamp: nowNotBlockStartAligned, + OnIndexSeries: h1, + }, testDoc1()) + batch.Append(WriteBatchEntry{ + Timestamp: nowNotBlockStartAligned, + OnIndexSeries: h2, + }, testDoc2()) + batch.Append(WriteBatchEntry{ + Timestamp: nowNotBlockStartAligned, + OnIndexSeries: h3, + }, testDoc3()) + + res, err := b.WriteBatch(batch) + require.NoError(t, err) + require.Equal(t, int64(3), res.NumSuccess) + require.Equal(t, int64(0), res.NumError) + + results := NewAggregateResults(ident.StringID("ns"), AggregateResultsOptions{ + SizeLimit: 10, + Type: AggregateTagNamesAndValues, + }, testOpts) + + exhaustive, err := b.Aggregate(resource.NewCancellableLifetime(), QueryOptions{Limit: 10}, results) + require.NoError(t, err) + require.True(t, exhaustive) + + assertAggregateResultsMapEquals(t, map[string][]string{ + "bar": []string{"baz", "qux"}, + "some": []string{"more", "other"}, + }, results) +} + +func assertAggregateResultsMapEquals(t *testing.T, expected map[string][]string, observed AggregateResults) { + aggResultsMap := observed.Map() + // ensure `expected` contained in `observed` + for field, terms := range expected { + entry, ok := aggResultsMap.Get(ident.StringID(field)) + require.True(t, ok, "field from expected map missing in observed", field) + valuesMap := entry.valuesMap + for _, term := range terms { + _, ok = valuesMap.Get(ident.StringID(term)) + require.True(t, ok, "term from expected map missing in observed", field, term) + } + } + // ensure `observed` contained in `expected` + for _, entry := range aggResultsMap.Iter() { + field := entry.Key() + valuesMap := entry.Value().valuesMap + for _, entry := range valuesMap.Iter() { + term := entry.Key() + slice, ok := expected[field.String()] + require.True(t, ok, "field from observed map missing in expected", field.String()) + found := false + for _, expTerm := range slice { + if expTerm == term.String() { + found = true + } + } + require.True(t, found, "term from observed map missing in expected", field.String(), term.String()) + } + } +} + func testSegment(t *testing.T, docs ...doc.Document) segment.Segment { seg, err := mem.NewSegment(0, testOpts.MemSegmentOptions()) require.NoError(t, err) diff --git a/src/dbnode/storage/index/field_terms_iterator_prop_test.go b/src/dbnode/storage/index/field_terms_iterator_prop_test.go new file mode 100644 index 0000000000..4bf17963c1 --- /dev/null +++ b/src/dbnode/storage/index/field_terms_iterator_prop_test.go @@ -0,0 +1,209 @@ +// +build big + +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package index + +import ( + "fmt" + "math/rand" + "os" + "sort" + "strings" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + "github.com/m3db/m3/src/m3ninx/index/segment" + xtest "github.com/m3db/m3/src/x/test" +) + +func TestFieldsTermsIteratorPropertyTest(t *testing.T) { + parameters := gopter.DefaultTestParameters() + seed := time.Now().UnixNano() + parameters.MinSuccessfulTests = 100 + parameters.MaxSize = 40 + parameters.Rng = rand.New(rand.NewSource(seed)) + properties := gopter.NewProperties(parameters) + + properties.Property("Fields Terms Iteration works", prop.ForAll( + func(i fieldsTermsIteratorPropInput) (bool, error) { + expected := i.expected() + seg := i.setup.asSegment(t) + iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{ + iterateTerms: i.iterateTerms, + allowFn: i.allowFn, + }) + if err != nil { + return false, err + } + observed := toSlice(t, iter) + requireSlicesEqual(t, expected, observed) + return true, nil + }, + genFieldsTermsIteratorPropInput(), + )) + + reporter := gopter.NewFormatedReporter(true, 160, os.Stdout) + if !properties.Run(reporter) { + t.Errorf("failed with initial seed: %d", seed) + } +} + +func TestFieldsTermsIteratorPropertyTestNoPanic(t *testing.T) { + ctrl := gomock.NewController(xtest.Reporter{t}) + defer ctrl.Finish() + + parameters := gopter.DefaultTestParameters() + seed := time.Now().UnixNano() + parameters.MinSuccessfulTests = 100 + parameters.MaxSize = 40 + parameters.Rng = rand.New(rand.NewSource(seed)) + properties := gopter.NewProperties(parameters) + + // the correctness prop test TestFieldsTermsIteratorPropertyTest, ensures we behave correctly + // on the happy path; this prop tests ensures we don't panic unless the underlying iterator + // itself panics. + properties.Property("Fields Terms Iteration doesn't blow up", prop.ForAll( + func(seg segment.Segment, iterate bool) (bool, error) { + iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{ + iterateTerms: iterate, + }) + if err != nil { + return false, err + } + toSlice(t, iter) + return true, nil + }, + genIterableSegment(ctrl), + gen.Bool(), + )) + + reporter := gopter.NewFormatedReporter(true, 160, os.Stdout) + if !properties.Run(reporter) { + t.Errorf("failed with initial seed: %d", seed) + } +} + +type fieldsTermsIteratorPropInput struct { + setup fieldsTermsIterSetup + iterateTerms bool + allowFn allowFn +} + +func (i fieldsTermsIteratorPropInput) expected() []pair { + fields := i.setup.fields + expected := make([]pair, 0, len(fields)) + seen := make(map[string]bool, len(fields)) + for _, f := range fields { + if !i.allowFn([]byte(f.Name)) { + continue + } + if seen[f.Name] { + continue + } + seen[f.Name] = true + if !i.iterateTerms { + f.Value = "" + } + expected = append(expected, f) + } + return expected +} + +func genIterableSegment(ctrl *gomock.Controller) gopter.Gen { + return gen.MapOf(genIterpoint(), gen.SliceOf(genIterpoint())). + Map(func(tagValues map[iterpoint][]iterpoint) segment.Segment { + fields := make([]iterpoint, 0, len(tagValues)) + for f := range tagValues { + fields = append(fields, f) + } + sort.Slice(fields, func(i, j int) bool { + return strings.Compare(fields[i].value, fields[j].value) < 0 + }) + + s := segment.NewMockSegment(ctrl) + fieldIterable := segment.NewMockFieldsIterable(ctrl) + fieldIterator := &stubFieldIterator{points: fields} + termsIterable := segment.NewMockTermsIterable(ctrl) + + s.EXPECT().FieldsIterable().Return(fieldIterable).AnyTimes() + s.EXPECT().TermsIterable().Return(termsIterable).AnyTimes() + fieldIterable.EXPECT().Fields().Return(fieldIterator, nil).AnyTimes() + + for f, values := range tagValues { + sort.Slice(values, func(i, j int) bool { + return strings.Compare(values[i].value, values[j].value) < 0 + }) + termIterator := &stubTermIterator{points: values} + termsIterable.EXPECT().Terms([]byte(f.value)).Return(termIterator, nil).AnyTimes() + } + return s + }) +} + +func genIterpoint() gopter.Gen { + return gen.Identifier().Map(func(s string, params *gopter.GenParameters) iterpoint { + ip := iterpoint{value: s} + if params.NextBool() { + ip.err = fmt.Errorf(s) + } + return ip + }) +} + +func genFieldsTermsIteratorPropInput() gopter.Gen { + return genFieldsTermsIteratorSetup(). + Map(func(s fieldsTermsIterSetup, params *gopter.GenParameters) fieldsTermsIteratorPropInput { + allowedFields := make(map[string]bool, len(s.fields)) + for _, f := range s.fields { + if params.NextBool() { + allowedFields[f.Name] = true + } + } + return fieldsTermsIteratorPropInput{ + setup: s, + iterateTerms: params.NextBool(), + allowFn: func(f []byte) bool { + return allowedFields[string(f)] + }, + } + }) +} + +func genFieldsTermsIteratorSetup() gopter.Gen { + return gen.SliceOf( + gen.Identifier()). + SuchThat(func(items []string) bool { + return len(items)%2 == 0 && len(items) > 0 + }). + Map(func(items []string) fieldsTermsIterSetup { + pairs := make([]pair, 0, len(items)/2) + for i := 0; i < len(items); i += 2 { + name, value := items[i], items[i+1] + pairs = append(pairs, pair{name, value}) + } + return newFieldsTermsIterSetup(pairs...) + }) +} diff --git a/src/dbnode/storage/index/field_terms_iterator_test.go b/src/dbnode/storage/index/field_terms_iterator_test.go new file mode 100644 index 0000000000..647a5168ed --- /dev/null +++ b/src/dbnode/storage/index/field_terms_iterator_test.go @@ -0,0 +1,360 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package index + +import ( + "bytes" + "fmt" + "sort" + "strings" + "testing" + + "github.com/m3db/m3/src/m3ninx/doc" + "github.com/m3db/m3/src/m3ninx/index/segment" + "github.com/m3db/m3/src/m3ninx/index/segment/fst" + "github.com/m3db/m3/src/m3ninx/postings" + "github.com/m3db/m3/src/m3ninx/util" + xtest "github.com/m3db/m3/src/x/test" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +var ( + testFstOptions = fst.NewOptions() + lotsTestDocuments = util.MustReadDocs("../../../m3ninx/util/testdata/node_exporter.json", 2000) +) + +func TestFieldsTermsIteratorSimple(t *testing.T) { + s := newFieldsTermsIterSetup( + pair{"a", "b"}, pair{"a", "c"}, + pair{"d", "e"}, pair{"d", "f"}, + pair{"g", "h"}, + pair{"i", "j"}, + pair{"k", "l"}, + ) + seg := s.asSegment(t) + + iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{iterateTerms: true}) + require.NoError(t, err) + s.requireEquals(t, iter) +} + +func TestFieldsTermsIteratorReuse(t *testing.T) { + pairs := []pair{ + pair{"a", "b"}, pair{"a", "c"}, + pair{"d", "e"}, pair{"d", "f"}, + pair{"g", "h"}, + pair{"i", "j"}, + pair{"k", "l"}, + } + + iter, err := newFieldsAndTermsIterator(nil, fieldsAndTermsIteratorOpts{}) + require.NoError(t, err) + + s := newFieldsTermsIterSetup(pairs...) + seg := s.asSegment(t) + err = iter.Reset(seg, fieldsAndTermsIteratorOpts{iterateTerms: true}) + require.NoError(t, err) + s.requireEquals(t, iter) + + err = iter.Reset(seg, fieldsAndTermsIteratorOpts{ + iterateTerms: true, + allowFn: func(f []byte) bool { + return !bytes.Equal([]byte("a"), f) && !bytes.Equal([]byte("k"), f) + }, + }) + require.NoError(t, err) + slice := toSlice(t, iter) + requireSlicesEqual(t, []pair{ + pair{"d", "e"}, pair{"d", "f"}, + pair{"g", "h"}, + pair{"i", "j"}, + }, slice) + + err = iter.Reset(seg, fieldsAndTermsIteratorOpts{ + iterateTerms: true, + allowFn: func(f []byte) bool { + return bytes.Equal([]byte("k"), f) || bytes.Equal([]byte("a"), f) + }, + }) + require.NoError(t, err) + slice = toSlice(t, iter) + requireSlicesEqual(t, []pair{ + pair{"a", "b"}, pair{"a", "c"}, + pair{"k", "l"}, + }, slice) +} + +func TestFieldsTermsIteratorSimpleSkip(t *testing.T) { + input := []pair{ + pair{"a", "b"}, pair{"a", "c"}, + pair{"d", "e"}, pair{"d", "f"}, + pair{"g", "h"}, + pair{"i", "j"}, + pair{"k", "l"}, + } + s := newFieldsTermsIterSetup(input...) + seg := s.asSegment(t) + + iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{ + iterateTerms: true, + allowFn: func(f []byte) bool { + return !bytes.Equal([]byte("a"), f) && !bytes.Equal([]byte("k"), f) + }, + }) + require.NoError(t, err) + slice := toSlice(t, iter) + requireSlicesEqual(t, []pair{ + pair{"d", "e"}, pair{"d", "f"}, + pair{"g", "h"}, + pair{"i", "j"}, + }, slice) +} + +func TestFieldsTermsIteratorTermsOnly(t *testing.T) { + s := newFieldsTermsIterSetup( + pair{"a", "b"}, pair{"a", "c"}, + pair{"d", "e"}, pair{"d", "f"}, + pair{"g", "h"}, + pair{"i", "j"}, + pair{"k", "l"}, + ) + seg := s.asSegment(t) + + iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{}) + require.NoError(t, err) + slice := toSlice(t, iter) + requireSlicesEqual(t, []pair{ + pair{"a", ""}, pair{"d", ""}, pair{"g", ""}, pair{"i", ""}, pair{"k", ""}, + }, slice) +} + +func TestFieldsTermsIteratorEmptyTerm(t *testing.T) { + ctrl := gomock.NewController(xtest.Reporter{t}) + defer ctrl.Finish() + + seg := newMockSegment(ctrl, map[string][]string{ + "a": nil, + }) + iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{iterateTerms: false}) + require.NoError(t, err) + slice := toSlice(t, iter) + requireSlicesEqual(t, []pair{pair{"a", ""}}, slice) +} + +func TestFieldsTermsIteratorEmptyTermInclude(t *testing.T) { + ctrl := gomock.NewController(xtest.Reporter{t}) + defer ctrl.Finish() + + seg := newMockSegment(ctrl, map[string][]string{ + "a": nil, + }) + iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{iterateTerms: true}) + require.NoError(t, err) + slice := toSlice(t, iter) + requireSlicesEqual(t, []pair{}, slice) +} + +func newMockSegment(ctrl *gomock.Controller, tagValues map[string][]string) segment.Segment { + fields := make([]iterpoint, 0, len(tagValues)) + for k := range tagValues { + fields = append(fields, iterpoint{ + value: k, + }) + } + sort.Slice(fields, func(i, j int) bool { + return strings.Compare(fields[i].value, fields[j].value) < 0 + }) + + s := segment.NewMockSegment(ctrl) + fieldIterable := segment.NewMockFieldsIterable(ctrl) + fieldIterator := &stubFieldIterator{points: fields} + termsIterable := segment.NewMockTermsIterable(ctrl) + + s.EXPECT().FieldsIterable().Return(fieldIterable).AnyTimes() + s.EXPECT().TermsIterable().Return(termsIterable).AnyTimes() + fieldIterable.EXPECT().Fields().Return(fieldIterator, nil).AnyTimes() + + for _, f := range fields { + termValues := tagValues[f.value] + sort.Strings(termValues) + terms := make([]iterpoint, 0, len(termValues)) + for _, t := range termValues { + terms = append(terms, iterpoint{ + value: t, + }) + } + termIterator := &stubTermIterator{points: terms} + termsIterable.EXPECT().Terms([]byte(f.value)).Return(termIterator, nil).AnyTimes() + } + + return s +} + +type stubTermIterator struct { + current iterpoint + points []iterpoint +} + +func (s *stubTermIterator) Next() bool { + if len(s.points) == 0 { + return false + } + s.current = s.points[0] + s.points = s.points[1:] + return true +} + +func (s *stubTermIterator) Current() ([]byte, postings.List) { + return []byte(s.current.value), nil +} + +func (s *stubTermIterator) Err() error { + return s.current.err +} + +func (s *stubTermIterator) Close() error { + if s.current.err != nil { + return s.current.err + } + for s.Next() { + if err := s.Err(); err != nil { + return err + } + } + return nil +} + +type stubFieldIterator struct { + current iterpoint + points []iterpoint +} + +func (s *stubFieldIterator) Next() bool { + if len(s.points) == 0 { + return false + } + s.current = s.points[0] + s.points = s.points[1:] + return true +} + +func (s *stubFieldIterator) Current() []byte { + return []byte(s.current.value) +} + +func (s *stubFieldIterator) Err() error { + return s.current.err +} + +func (s *stubFieldIterator) Close() error { + if s.current.err != nil { + return s.current.err + } + for s.Next() { + if err := s.Err(); err != nil { + return err + } + } + return nil +} + +type iterpoint struct { + err error + value string +} + +type pair struct { + Name, Value string +} + +func newFieldsTermsIterSetup(fields ...pair) fieldsTermsIterSetup { + sort.Slice(fields, func(i, j int) bool { + c := strings.Compare(fields[i].Name, fields[j].Name) + if c == 0 { + return strings.Compare(fields[i].Value, fields[j].Value) < 0 + } + return c < 0 + }) + return fieldsTermsIterSetup{fields} +} + +type fieldsTermsIterSetup struct { + fields []pair +} + +func (s *fieldsTermsIterSetup) asSegment(t *testing.T) segment.Segment { + docs := make([]doc.Document, 0, len(s.fields)) + for _, f := range s.fields { + docs = append(docs, doc.Document{ + ID: []byte(fmt.Sprintf("id_%v_%v", f.Name, f.Value)), + Fields: []doc.Field{ + doc.Field{ + Name: []byte(f.Name), + Value: []byte(f.Value), + }, + }, + }) + } + memSeg := testSegment(t, docs...).(segment.MutableSegment) + return fst.ToTestSegment(t, memSeg, testFstOptions) +} + +func (s *fieldsTermsIterSetup) requireEquals(t *testing.T, iter fieldsAndTermsIterator) { + pending := s.fields + for len(pending) > 0 { + require.True(t, iter.Next()) + name, value := iter.Current() + if bytes.Equal(name, doc.IDReservedFieldName) { + continue + } + top := pending[0] + pending = pending[1:] + require.Equal(t, top.Name, string(name)) + require.Equal(t, top.Value, string(value)) + } + require.False(t, iter.Next()) + require.NoError(t, iter.Err()) + require.NoError(t, iter.Close()) +} + +func toSlice(t *testing.T, iter fieldsAndTermsIterator) []pair { + var pairs []pair + for iter.Next() { + n, v := iter.Current() + if bytes.Equal(n, doc.IDReservedFieldName) { + continue + } + pairs = append(pairs, pair{ + Name: string(n), + Value: string(v), + }) + } + return pairs +} + +func requireSlicesEqual(t *testing.T, a, b []pair) { + require.Equal(t, len(a), len(b)) + for i := 0; i < len(a); i++ { + require.Equal(t, a[i], b[i]) + } +} diff --git a/src/dbnode/storage/index/fields_terms_iterator.go b/src/dbnode/storage/index/fields_terms_iterator.go new file mode 100644 index 0000000000..fea0c91388 --- /dev/null +++ b/src/dbnode/storage/index/fields_terms_iterator.go @@ -0,0 +1,187 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package index + +import ( + "github.com/m3db/m3/src/m3ninx/index/segment" + xerrors "github.com/m3db/m3/src/x/errors" +) + +// fieldsAndTermsIteratorOpts configures the fieldsAndTermsIterator. +type fieldsAndTermsIteratorOpts struct { + iterateTerms bool + allowFn allowFn +} + +func (o fieldsAndTermsIteratorOpts) allow(f []byte) bool { + if o.allowFn == nil { + return true + } + return o.allowFn(f) +} + +type allowFn func(field []byte) bool + +type fieldsAndTermsIter struct { + seg segment.Segment + opts fieldsAndTermsIteratorOpts + + err error + fieldIter segment.FieldsIterator + termIter segment.TermsIterator + + current struct { + field []byte + term []byte + } +} + +var ( + fieldsAndTermsIterZeroed fieldsAndTermsIter +) + +var _ fieldsAndTermsIterator = &fieldsAndTermsIter{} + +// newFieldsAndTermsIteratorFn is the lambda definition of the ctor for fieldsAndTermsIterator. +type newFieldsAndTermsIteratorFn func( + s segment.Segment, opts fieldsAndTermsIteratorOpts, +) (fieldsAndTermsIterator, error) + +func newFieldsAndTermsIterator(s segment.Segment, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) { + iter := &fieldsAndTermsIter{} + err := iter.Reset(s, opts) + if err != nil { + return nil, err + } + return iter, nil +} + +func (fti *fieldsAndTermsIter) Reset(s segment.Segment, opts fieldsAndTermsIteratorOpts) error { + *fti = fieldsAndTermsIterZeroed + fti.seg = s + fti.opts = opts + if s == nil { + return nil + } + fiter, err := s.FieldsIterable().Fields() + if err != nil { + return err + } + fti.fieldIter = fiter + return nil +} + +func (fti *fieldsAndTermsIter) setNextField() bool { + fieldIter := fti.fieldIter + if fieldIter == nil { + return false + } + + for fieldIter.Next() { + field := fieldIter.Current() + if !fti.opts.allow(field) { + continue + } + fti.current.field = field + return true + } + + fti.err = fieldIter.Err() + return false +} + +func (fti *fieldsAndTermsIter) setNext() bool { + // check if current field has another term + if fti.termIter != nil { + if fti.termIter.Next() { + fti.current.term, _ = fti.termIter.Current() + return true + } + if err := fti.termIter.Err(); err != nil { + fti.err = err + return false + } + if err := fti.termIter.Close(); err != nil { + fti.err = err + return false + } + } + + // i.e. need to switch to next field + hasNext := fti.setNextField() + if !hasNext { + return false + } + + // and get next term for the field + termsIter, err := fti.seg.TermsIterable().Terms(fti.current.field) + if err != nil { + fti.err = err + return false + } + fti.termIter = termsIter + + hasNext = fti.termIter.Next() + if !hasNext { + if fti.fieldIter.Err(); err != nil { + fti.err = err + return false + } + fti.termIter = nil + // i.e. no more terms for this field, should try the next one + return fti.setNext() + } + + fti.current.term, _ = fti.termIter.Current() + return true +} + +func (fti *fieldsAndTermsIter) Next() bool { + if fti.err != nil { + return false + } + // if only need to iterate fields + if !fti.opts.iterateTerms { + return fti.setNextField() + } + // iterating both fields and terms + return fti.setNext() +} + +func (fti *fieldsAndTermsIter) Current() (field, term []byte) { + return fti.current.field, fti.current.term +} + +func (fti *fieldsAndTermsIter) Err() error { + return fti.err +} + +func (fti *fieldsAndTermsIter) Close() error { + var multiErr xerrors.MultiError + if fti.fieldIter != nil { + multiErr = multiErr.Add(fti.fieldIter.Close()) + } + if fti.termIter != nil { + multiErr = multiErr.Add(fti.termIter.Close()) + } + multiErr = multiErr.Add(fti.Reset(nil, fieldsAndTermsIteratorOpts{})) + return multiErr.FinalError() +} diff --git a/src/dbnode/storage/index/index_mock.go b/src/dbnode/storage/index/index_mock.go index 268def6ad3..eb40eb016f 100644 --- a/src/dbnode/storage/index/index_mock.go +++ b/src/dbnode/storage/index/index_mock.go @@ -32,6 +32,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" "github.com/m3db/m3/src/dbnode/storage/index/compaction" "github.com/m3db/m3/src/m3ninx/doc" + "github.com/m3db/m3/src/m3ninx/index/segment" "github.com/m3db/m3/src/m3ninx/index/segment/builder" "github.com/m3db/m3/src/m3ninx/index/segment/fst" "github.com/m3db/m3/src/m3ninx/index/segment/mem" @@ -390,6 +391,34 @@ func (mr *MockAggregateResultsMockRecorder) Reset(nsID, aggregateQueryOpts inter return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockAggregateResults)(nil).Reset), nsID, aggregateQueryOpts) } +// AggregateResultsOptions mocks base method +func (m *MockAggregateResults) AggregateResultsOptions() AggregateResultsOptions { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AggregateResultsOptions") + ret0, _ := ret[0].(AggregateResultsOptions) + return ret0 +} + +// AggregateResultsOptions indicates an expected call of AggregateResultsOptions +func (mr *MockAggregateResultsMockRecorder) AggregateResultsOptions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateResultsOptions", reflect.TypeOf((*MockAggregateResults)(nil).AggregateResultsOptions)) +} + +// AddFields mocks base method +func (m *MockAggregateResults) AddFields(batch []AggregateResultsEntry) int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddFields", batch) + ret0, _ := ret[0].(int) + return ret0 +} + +// AddFields indicates an expected call of AddFields +func (mr *MockAggregateResultsMockRecorder) AddFields(batch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddFields", reflect.TypeOf((*MockAggregateResults)(nil).AddFields), batch) +} + // Map mocks base method func (m *MockAggregateResults) Map() *AggregateResultsMap { m.ctrl.T.Helper() @@ -654,6 +683,21 @@ func (mr *MockBlockMockRecorder) Query(cancellable, query, opts, results interfa return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockBlock)(nil).Query), cancellable, query, opts, results) } +// Aggregate mocks base method +func (m *MockBlock) Aggregate(cancellable *resource.CancellableLifetime, opts QueryOptions, results AggregateResults) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Aggregate", cancellable, opts, results) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Aggregate indicates an expected call of Aggregate +func (mr *MockBlockMockRecorder) Aggregate(cancellable, opts, results interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockBlock)(nil).Aggregate), cancellable, opts, results) +} + // AddResults mocks base method func (m *MockBlock) AddResults(results result.IndexBlock) error { m.ctrl.T.Helper() @@ -802,6 +846,100 @@ func (mr *MockBlockStatsReporterMockRecorder) ReportSegmentStats(stats interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportSegmentStats", reflect.TypeOf((*MockBlockStatsReporter)(nil).ReportSegmentStats), stats) } +// MockfieldsAndTermsIterator is a mock of fieldsAndTermsIterator interface +type MockfieldsAndTermsIterator struct { + ctrl *gomock.Controller + recorder *MockfieldsAndTermsIteratorMockRecorder +} + +// MockfieldsAndTermsIteratorMockRecorder is the mock recorder for MockfieldsAndTermsIterator +type MockfieldsAndTermsIteratorMockRecorder struct { + mock *MockfieldsAndTermsIterator +} + +// NewMockfieldsAndTermsIterator creates a new mock instance +func NewMockfieldsAndTermsIterator(ctrl *gomock.Controller) *MockfieldsAndTermsIterator { + mock := &MockfieldsAndTermsIterator{ctrl: ctrl} + mock.recorder = &MockfieldsAndTermsIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockfieldsAndTermsIterator) EXPECT() *MockfieldsAndTermsIteratorMockRecorder { + return m.recorder +} + +// Next mocks base method +func (m *MockfieldsAndTermsIterator) Next() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Next indicates an expected call of Next +func (mr *MockfieldsAndTermsIteratorMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockfieldsAndTermsIterator)(nil).Next)) +} + +// Current mocks base method +func (m *MockfieldsAndTermsIterator) Current() ([]byte, []byte) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Current") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].([]byte) + return ret0, ret1 +} + +// Current indicates an expected call of Current +func (mr *MockfieldsAndTermsIteratorMockRecorder) Current() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockfieldsAndTermsIterator)(nil).Current)) +} + +// Err mocks base method +func (m *MockfieldsAndTermsIterator) Err() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 +} + +// Err indicates an expected call of Err +func (mr *MockfieldsAndTermsIteratorMockRecorder) Err() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockfieldsAndTermsIterator)(nil).Err)) +} + +// Close mocks base method +func (m *MockfieldsAndTermsIterator) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close +func (mr *MockfieldsAndTermsIteratorMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockfieldsAndTermsIterator)(nil).Close)) +} + +// Reset mocks base method +func (m *MockfieldsAndTermsIterator) Reset(seg segment.Segment, opts fieldsAndTermsIteratorOpts) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reset", seg, opts) + ret0, _ := ret[0].(error) + return ret0 +} + +// Reset indicates an expected call of Reset +func (mr *MockfieldsAndTermsIteratorMockRecorder) Reset(seg, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockfieldsAndTermsIterator)(nil).Reset), seg, opts) +} + // MockOptions is a mock of Options interface type MockOptions struct { ctrl *gomock.Controller @@ -1175,6 +1313,34 @@ func (mr *MockOptionsMockRecorder) DocumentArrayPool() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DocumentArrayPool", reflect.TypeOf((*MockOptions)(nil).DocumentArrayPool)) } +// SetAggregateResultsEntryArrayPool mocks base method +func (m *MockOptions) SetAggregateResultsEntryArrayPool(value AggregateResultsEntryArrayPool) Options { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetAggregateResultsEntryArrayPool", value) + ret0, _ := ret[0].(Options) + return ret0 +} + +// SetAggregateResultsEntryArrayPool indicates an expected call of SetAggregateResultsEntryArrayPool +func (mr *MockOptionsMockRecorder) SetAggregateResultsEntryArrayPool(value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAggregateResultsEntryArrayPool", reflect.TypeOf((*MockOptions)(nil).SetAggregateResultsEntryArrayPool), value) +} + +// AggregateResultsEntryArrayPool mocks base method +func (m *MockOptions) AggregateResultsEntryArrayPool() AggregateResultsEntryArrayPool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AggregateResultsEntryArrayPool") + ret0, _ := ret[0].(AggregateResultsEntryArrayPool) + return ret0 +} + +// AggregateResultsEntryArrayPool indicates an expected call of AggregateResultsEntryArrayPool +func (mr *MockOptionsMockRecorder) AggregateResultsEntryArrayPool() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateResultsEntryArrayPool", reflect.TypeOf((*MockOptions)(nil).AggregateResultsEntryArrayPool)) +} + // SetForegroundCompactionPlannerOptions mocks base method func (m *MockOptions) SetForegroundCompactionPlannerOptions(v compaction.PlannerOptions) Options { m.ctrl.T.Helper() diff --git a/src/dbnode/storage/index/options.go b/src/dbnode/storage/index/options.go index 0a0054d70f..07f505adbf 100644 --- a/src/dbnode/storage/index/options.go +++ b/src/dbnode/storage/index/options.go @@ -45,16 +45,26 @@ const ( documentArrayPoolSize = 256 documentArrayPoolCapacity = 256 documentArrayPoolMaxCapacity = 256 // Do not allow grows, since we know the size + + // aggregateResultsEntryArrayPool size in general: 256*256*sizeof(doc.Field) + // = 256 * 256 * 16 + // = 1mb (but with Go's heap probably 2mb) + // TODO(prateek): Make this configurable in a followup change. + aggregateResultsEntryArrayPoolSize = 256 + aggregateResultsEntryArrayPoolCapacity = 256 + aggregateResultsEntryArrayPoolMaxCapacity = 256 // Do not allow grows, since we know the size ) var ( - errOptionsIdentifierPoolUnspecified = errors.New("identifier pool is unset") - errOptionsBytesPoolUnspecified = errors.New("checkedbytes pool is unset") - errOptionsResultsPoolUnspecified = errors.New("results pool is unset") - errOptionsAggResultsPoolUnspecified = errors.New("aggregate results pool is unset") - errOptionsAggValuesPoolUnspecified = errors.New("aggregate values pool is unset") - errIDGenerationDisabled = errors.New("id generation is disabled") - errPostingsListCacheUnspecified = errors.New("postings list cache is unset") + errOptionsIdentifierPoolUnspecified = errors.New("identifier pool is unset") + errOptionsBytesPoolUnspecified = errors.New("checkedbytes pool is unset") + errOptionsResultsPoolUnspecified = errors.New("results pool is unset") + errOptionsAggResultsPoolUnspecified = errors.New("aggregate results pool is unset") + errOptionsAggValuesPoolUnspecified = errors.New("aggregate values pool is unset") + errOptionsDocPoolUnspecified = errors.New("docs array pool is unset") + errOptionsAggResultsEntryPoolUnspecified = errors.New("aggregate results entry array pool is unset") + errIDGenerationDisabled = errors.New("id generation is disabled") + errPostingsListCacheUnspecified = errors.New("postings list cache is unset") defaultForegroundCompactionOpts compaction.PlannerOptions defaultBackgroundCompactionOpts compaction.PlannerOptions @@ -102,6 +112,7 @@ type opts struct { aggResultsPool AggregateResultsPool aggValuesPool AggregateValuesPool docArrayPool doc.DocumentArrayPool + aggResultsEntryArrayPool AggregateResultsEntryArrayPool foregroundCompactionPlannerOpts compaction.PlannerOptions backgroundCompactionPlannerOpts compaction.PlannerOptions postingsListCache *PostingsListCache @@ -131,6 +142,14 @@ func NewOptions() Options { }) docArrayPool.Init() + aggResultsEntryArrayPool := NewAggregateResultsEntryArrayPool(AggregateResultsEntryArrayPoolOpts{ + Options: pool.NewObjectPoolOptions(). + SetSize(aggregateResultsEntryArrayPoolSize), + Capacity: aggregateResultsEntryArrayPoolCapacity, + MaxCapacity: aggregateResultsEntryArrayPoolMaxCapacity, + }) + aggResultsEntryArrayPool.Init() + instrumentOpts := instrument.NewOptions() opts := &opts{ insertMode: defaultIndexInsertMode, @@ -145,6 +164,7 @@ func NewOptions() Options { aggResultsPool: aggResultsPool, aggValuesPool: aggValuesPool, docArrayPool: docArrayPool, + aggResultsEntryArrayPool: aggResultsEntryArrayPool, foregroundCompactionPlannerOpts: defaultForegroundCompactionOpts, backgroundCompactionPlannerOpts: defaultBackgroundCompactionOpts, } @@ -171,9 +191,15 @@ func (o *opts) Validate() error { if o.aggResultsPool == nil { return errOptionsAggResultsPoolUnspecified } - if o.aggResultsPool == nil { + if o.aggValuesPool == nil { return errOptionsAggValuesPoolUnspecified } + if o.docArrayPool == nil { + return errOptionsDocPoolUnspecified + } + if o.aggResultsEntryArrayPool == nil { + return errOptionsAggResultsEntryPoolUnspecified + } if o.postingsListCache == nil { return errPostingsListCacheUnspecified } @@ -304,6 +330,16 @@ func (o *opts) DocumentArrayPool() doc.DocumentArrayPool { return o.docArrayPool } +func (o *opts) SetAggregateResultsEntryArrayPool(value AggregateResultsEntryArrayPool) Options { + opts := *o + opts.aggResultsEntryArrayPool = value + return &opts +} + +func (o *opts) AggregateResultsEntryArrayPool() AggregateResultsEntryArrayPool { + return o.aggResultsEntryArrayPool +} + func (o *opts) SetForegroundCompactionPlannerOptions(value compaction.PlannerOptions) Options { opts := *o opts.foregroundCompactionPlannerOpts = value diff --git a/src/dbnode/storage/index/types.go b/src/dbnode/storage/index/types.go index 94ed466e03..c1fae3ee2d 100644 --- a/src/dbnode/storage/index/types.go +++ b/src/dbnode/storage/index/types.go @@ -30,14 +30,15 @@ import ( "github.com/m3db/m3/src/dbnode/storage/index/compaction" "github.com/m3db/m3/src/m3ninx/doc" "github.com/m3db/m3/src/m3ninx/idx" + "github.com/m3db/m3/src/m3ninx/index/segment" "github.com/m3db/m3/src/m3ninx/index/segment/builder" "github.com/m3db/m3/src/m3ninx/index/segment/fst" "github.com/m3db/m3/src/m3ninx/index/segment/mem" - "github.com/m3db/m3/src/x/resource" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" "github.com/m3db/m3/src/x/instrument" "github.com/m3db/m3/src/x/pool" + "github.com/m3db/m3/src/x/resource" xtime "github.com/m3db/m3/src/x/time" ) @@ -181,6 +182,16 @@ type AggregateResults interface { aggregateQueryOpts AggregateResultsOptions, ) + // AggregateResultsOptions returns the options for this AggregateResult. + AggregateResultsOptions() AggregateResultsOptions + + // AddFields adds the batch of fields to the results set, it will + // assume ownership of the idents (and backing bytes) provided to it. + // i.e. it is not safe to use/modify the idents once this function returns. + AddFields( + batch []AggregateResultsEntry, + ) (size int) + // Map returns a map from tag name -> possible tag values, // comprising aggregate results. // Since a lock is not held when accessing the map after a call to this @@ -238,6 +249,13 @@ type AggregateValuesPool interface { Put(value AggregateValues) } +// AggregateResultsEntry is used during block.Aggregate() execution +// to collect entries. +type AggregateResultsEntry struct { + Field ident.ID + Terms []ident.ID +} + // OnIndexSeries provides a set of callback hooks to allow the reverse index // to do lifecycle management of any resources retained during indexing. type OnIndexSeries interface { @@ -273,7 +291,16 @@ type Block interface { results BaseResults, ) (exhaustive bool, err error) - // AddResults adds bootstrap results to the block, if c. + // Aggregate aggregates known tag names/values. + // NB(prateek): different from aggregating by means of Query, as we can + // avoid going to documents, relying purely on the indexed FSTs. + Aggregate( + cancellable *resource.CancellableLifetime, + opts QueryOptions, + results AggregateResults, + ) (exhaustive bool, err error) + + // AddResults adds bootstrap results to the block. AddResults(results result.IndexBlock) error // Tick does internal house keeping operations. @@ -698,6 +725,25 @@ func (e WriteBatchEntry) Result() WriteBatchEntryResult { return *e.result } +// fieldsAndTermsIterator iterates over all known fields and terms for a segment. +type fieldsAndTermsIterator interface { + // Next returns a bool indicating if there are any more elements. + Next() bool + + // Current returns the current element. + // NB: the element returned is only valid until the subsequent call to Next(). + Current() (field, term []byte) + + // Err returns any errors encountered during iteration. + Err() error + + // Close releases any resources held by the iterator. + Close() error + + // Reset resets the iterator to the start iterating the given segment. + Reset(seg segment.Segment, opts fieldsAndTermsIteratorOpts) error +} + // Options control the Indexing knobs. type Options interface { // Validate validates assumptions baked into the code. @@ -775,6 +821,12 @@ type Options interface { // DocumentArrayPool returns the document array pool. DocumentArrayPool() doc.DocumentArrayPool + // SetAggregateResultsEntryArrayPool sets the aggregate results entry array pool. + SetAggregateResultsEntryArrayPool(value AggregateResultsEntryArrayPool) Options + + // AggregateResultsEntryArrayPool returns the aggregate results entry array pool. + AggregateResultsEntryArrayPool() AggregateResultsEntryArrayPool + // SetForegroundCompactionPlannerOptions sets the compaction planner options. SetForegroundCompactionPlannerOptions(v compaction.PlannerOptions) Options diff --git a/src/dbnode/storage/index_block_test.go b/src/dbnode/storage/index_block_test.go index 946366c1b6..a32dd6a44e 100644 --- a/src/dbnode/storage/index_block_test.go +++ b/src/dbnode/storage/index_block_test.go @@ -30,6 +30,7 @@ import ( "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/storage/namespace" "github.com/m3db/m3/src/m3ninx/doc" + "github.com/m3db/m3/src/m3ninx/idx" "github.com/m3db/m3/src/m3ninx/index/segment" "github.com/m3db/m3/src/x/context" "github.com/m3db/m3/src/x/ident" @@ -685,6 +686,7 @@ func TestNamespaceIndexBlockAggregateQuery(t *testing.T) { ctrl := gomock.NewController(xtest.Reporter{T: t}) defer ctrl.Finish() + query := idx.NewFieldQuery([]byte("a")) retention := 2 * time.Hour blockSize := time.Hour now := time.Now().Truncate(blockSize).Add(10 * time.Minute) @@ -748,7 +750,8 @@ func TestNamespaceIndexBlockAggregateQuery(t *testing.T) { // only queries as much as is needed (wrt to time) ctx := context.NewContext() - q := index.Query{} + + q := index.Query{query} qOpts := index.QueryOptions{ StartInclusive: t0, EndExclusive: now.Add(time.Minute), @@ -802,6 +805,7 @@ func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) { opts := testDatabaseOptions() opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(nowFn)) + query := idx.NewFieldQuery([]byte("a")) b0 := index.NewMockBlock(ctrl) b0.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes() b0.EXPECT().Close().Return(nil) @@ -858,7 +862,7 @@ func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) { // only queries as much as is needed (wrt to time) ctx := context.NewContext() - q := index.Query{} + q := index.Query{query} qOpts := index.QueryOptions{ StartInclusive: t0, EndExclusive: now.Add(time.Minute), @@ -874,3 +878,105 @@ func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) { require.NoError(t, err) ctx.BlockingClose() } + +func TestNamespaceIndexBlockAggregateQueryWithAllQuery(t *testing.T) { + ctrl := gomock.NewController(xtest.Reporter{T: t}) + defer ctrl.Finish() + + query := idx.NewAllQuery() + retention := 2 * time.Hour + blockSize := time.Hour + now := time.Now().Truncate(blockSize).Add(10 * time.Minute) + t0 := now.Truncate(blockSize) + t0Nanos := xtime.ToUnixNano(t0) + t1 := t0.Add(1 * blockSize) + t1Nanos := xtime.ToUnixNano(t1) + t2 := t1.Add(1 * blockSize) + var nowLock sync.Mutex + nowFn := func() time.Time { + nowLock.Lock() + defer nowLock.Unlock() + return now + } + opts := testDatabaseOptions() + opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(nowFn)) + + b0 := index.NewMockBlock(ctrl) + b0.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes() + b0.EXPECT().Close().Return(nil) + b0.EXPECT().StartTime().Return(t0).AnyTimes() + b0.EXPECT().EndTime().Return(t1).AnyTimes() + b1 := index.NewMockBlock(ctrl) + b1.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes() + b1.EXPECT().Close().Return(nil) + b1.EXPECT().StartTime().Return(t1).AnyTimes() + b1.EXPECT().EndTime().Return(t2).AnyTimes() + newBlockFn := func( + ts time.Time, + md namespace.Metadata, + _ index.BlockOptions, + io index.Options, + ) (index.Block, error) { + if ts.Equal(t0) { + return b0, nil + } + if ts.Equal(t1) { + return b1, nil + } + panic("should never get here") + } + md := testNamespaceMetadata(blockSize, retention) + idx, err := newNamespaceIndexWithNewBlockFn(md, newBlockFn, opts) + require.NoError(t, err) + + defer func() { + require.NoError(t, idx.Close()) + }() + + seg1 := segment.NewMockSegment(ctrl) + seg2 := segment.NewMockSegment(ctrl) + seg3 := segment.NewMockSegment(ctrl) + bootstrapResults := result.IndexResults{ + t0Nanos: result.NewIndexBlock(t0, []segment.Segment{seg1}, result.NewShardTimeRanges(t0, t1, 1, 2, 3)), + t1Nanos: result.NewIndexBlock(t1, []segment.Segment{seg2, seg3}, result.NewShardTimeRanges(t1, t2, 1, 2, 3)), + } + + b0.EXPECT().AddResults(bootstrapResults[t0Nanos]).Return(nil) + b1.EXPECT().AddResults(bootstrapResults[t1Nanos]).Return(nil) + require.NoError(t, idx.Bootstrap(bootstrapResults)) + + // only queries as much as is needed (wrt to time) + ctx := context.NewContext() + + q := index.Query{query} + qOpts := index.QueryOptions{ + StartInclusive: t0, + EndExclusive: now.Add(time.Minute), + } + aggOpts := index.AggregationOptions{QueryOptions: qOpts} + + b0.EXPECT().Aggregate(gomock.Any(), qOpts, gomock.Any()).Return(true, nil) + _, err = idx.AggregateQuery(ctx, q, aggOpts) + require.NoError(t, err) + + // queries multiple blocks if needed + qOpts = index.QueryOptions{ + StartInclusive: t0, + EndExclusive: t2.Add(time.Minute), + } + aggOpts = index.AggregationOptions{QueryOptions: qOpts} + b0.EXPECT().Aggregate(gomock.Any(), qOpts, gomock.Any()).Return(true, nil) + b1.EXPECT().Aggregate(gomock.Any(), qOpts, gomock.Any()).Return(true, nil) + _, err = idx.AggregateQuery(ctx, q, aggOpts) + require.NoError(t, err) + + // stops querying once a block returns non-exhaustive + qOpts = index.QueryOptions{ + StartInclusive: t0, + EndExclusive: t0.Add(time.Minute), + } + b0.EXPECT().Aggregate(gomock.Any(), qOpts, gomock.Any()).Return(false, nil) + aggOpts = index.AggregationOptions{QueryOptions: qOpts} + _, err = idx.AggregateQuery(ctx, q, aggOpts) + require.NoError(t, err) +} diff --git a/src/m3ninx/generated-source-files.mk b/src/m3ninx/generated-source-files.mk index 5b9bc0dd51..980fa83c3b 100644 --- a/src/m3ninx/generated-source-files.mk +++ b/src/m3ninx/generated-source-files.mk @@ -98,9 +98,9 @@ genny-map-segment-mem-fieldsmap: # generation rule for all generated arraypools .PHONY: genny-arraypool-all -genny-arraypool-all: \ - genny-arraypool-bytes-slice-array-pool \ - genny-arraypool-document-array-pool \ +genny-arraypool-all: \ + genny-arraypool-bytes-slice-array-pool \ + genny-arraypool-document-array-pool \ # arraypool generation rule for ./x/bytes.SliceArrayPool .PHONY: genny-arraypool-bytes-slice-array-pool @@ -114,7 +114,7 @@ genny-arraypool-bytes-slice-array-pool: rename_type_middle=Slice \ rename_constructor=NewSliceArrayPool \ - # arraypool generation rule for ./doc.DocumentArrayPool +# arraypool generation rule for ./doc.DocumentArrayPool .PHONY: genny-arraypool-document-array-pool genny-arraypool-document-array-pool: cd $(m3x_package_path) && make genny-arraypool \ @@ -127,4 +127,3 @@ genny-arraypool-document-array-pool: rename_constructor=NewDocumentArrayPool \ rename_gen_types=true \ - diff --git a/src/m3ninx/generated/mocks/generate.go b/src/m3ninx/generated/mocks/generate.go index 578a5b2db9..33756ac9a4 100644 --- a/src/m3ninx/generated/mocks/generate.go +++ b/src/m3ninx/generated/mocks/generate.go @@ -25,9 +25,9 @@ package mocks //go:generate sh -c "mockgen -package=doc -destination=$GOPATH/src/github.com/m3db/m3/src/m3ninx/doc/doc_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/m3ninx/doc/types.go" //go:generate sh -c "mockgen -package=search -destination=$GOPATH/src/github.com/m3db/m3/src/m3ninx/search/search_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/m3ninx/search/types.go" //go:generate sh -c "mockgen -package=persist -destination=$GOPATH/src/github.com/m3db/m3/src/m3ninx/persist/persist_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/m3ninx/persist/types.go" +//go:generate sh -c "mockgen -package=segment -destination=$GOPATH/src/github.com/m3db/m3/src/m3ninx/index/segment/segment_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/m3ninx/index/segment/types.go" // mockgen rules for generating mocks (reflection mode) //go:generate sh -c "mockgen -package=mem -destination=$GOPATH/src/github.com/m3db/m3/src/m3ninx/index/segment/mem/mem_mock.go github.com/m3db/m3/src/m3ninx/index/segment/mem ReadableSegment" //go:generate sh -c "mockgen -package=fst -destination=$GOPATH/src/github.com/m3db/m3/src/m3ninx/index/segment/fst/fst_mock.go github.com/m3db/m3/src/m3ninx/index/segment/fst Writer,Segment" -//go:generate sh -c "mockgen -package=segment -destination=$GOPATH/src/github.com/m3db/m3/src/m3ninx/index/segment/segment_mock.go github.com/m3db/m3/src/m3ninx/index/segment Segment,MutableSegment,Builder" //go:generate sh -c "mockgen -package=index -destination=$GOPATH/src/github.com/m3db/m3/src/m3ninx/index/index_mock.go github.com/m3db/m3/src/m3ninx/index Reader,DocRetriever" diff --git a/src/m3ninx/index/segment/fst/fst_terms_iterator.go b/src/m3ninx/index/segment/fst/fst_terms_iterator.go index f6d714c6ea..d280bb5d5c 100644 --- a/src/m3ninx/index/segment/fst/fst_terms_iterator.go +++ b/src/m3ninx/index/segment/fst/fst_terms_iterator.go @@ -32,7 +32,7 @@ type fstTermsIterOpts struct { } func (o fstTermsIterOpts) Close() error { - if o.finalizeFST { + if o.finalizeFST && o.fst != nil { return o.fst.Close() } return nil @@ -120,7 +120,6 @@ func (f *fstTermsIter) Close() error { var multiErr xerrors.MultiError multiErr = multiErr.Add(f.iter.Close()) multiErr = multiErr.Add(f.opts.Close()) - f.clear() return multiErr.FinalError() } diff --git a/src/m3ninx/index/segment/fst/fst_terms_postings_iterator.go b/src/m3ninx/index/segment/fst/fst_terms_postings_iterator.go index 7989f5f244..09cf5bb58b 100644 --- a/src/m3ninx/index/segment/fst/fst_terms_postings_iterator.go +++ b/src/m3ninx/index/segment/fst/fst_terms_postings_iterator.go @@ -108,7 +108,10 @@ func (f *fstTermsPostingsIter) Err() error { } func (f *fstTermsPostingsIter) Close() error { - err := f.termsIter.Close() + var err error + if f.termsIter != nil { + err = f.termsIter.Close() + } f.clear() return err } diff --git a/src/m3ninx/index/segment/segment_mock.go b/src/m3ninx/index/segment/segment_mock.go index c37f3ea95a..0609417bcd 100644 --- a/src/m3ninx/index/segment/segment_mock.go +++ b/src/m3ninx/index/segment/segment_mock.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/m3db/m3/src/m3ninx/index/segment (interfaces: Segment,MutableSegment,Builder) +// Source: github.com/m3db/m3/src/m3ninx/index/segment/types.go // Copyright (c) 2019 Uber Technologies, Inc. // @@ -57,35 +57,6 @@ func (m *MockSegment) EXPECT() *MockSegmentMockRecorder { return m.recorder } -// Close mocks base method -func (m *MockSegment) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close -func (mr *MockSegmentMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockSegment)(nil).Close)) -} - -// ContainsID mocks base method -func (m *MockSegment) ContainsID(arg0 []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ContainsID", arg0) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ContainsID indicates an expected call of ContainsID -func (mr *MockSegmentMockRecorder) ContainsID(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainsID", reflect.TypeOf((*MockSegment)(nil).ContainsID), arg0) -} - // FieldsIterable mocks base method func (m *MockSegment) FieldsIterable() FieldsIterable { m.ctrl.T.Helper() @@ -100,19 +71,18 @@ func (mr *MockSegmentMockRecorder) FieldsIterable() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsIterable", reflect.TypeOf((*MockSegment)(nil).FieldsIterable)) } -// Reader mocks base method -func (m *MockSegment) Reader() (index.Reader, error) { +// TermsIterable mocks base method +func (m *MockSegment) TermsIterable() TermsIterable { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Reader") - ret0, _ := ret[0].(index.Reader) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "TermsIterable") + ret0, _ := ret[0].(TermsIterable) + return ret0 } -// Reader indicates an expected call of Reader -func (mr *MockSegmentMockRecorder) Reader() *gomock.Call { +// TermsIterable indicates an expected call of TermsIterable +func (mr *MockSegmentMockRecorder) TermsIterable() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockSegment)(nil).Reader)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TermsIterable", reflect.TypeOf((*MockSegment)(nil).TermsIterable)) } // Size mocks base method @@ -129,60 +99,38 @@ func (mr *MockSegmentMockRecorder) Size() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockSegment)(nil).Size)) } -// TermsIterable mocks base method -func (m *MockSegment) TermsIterable() TermsIterable { +// ContainsID mocks base method +func (m *MockSegment) ContainsID(docID []byte) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TermsIterable") - ret0, _ := ret[0].(TermsIterable) - return ret0 + ret := m.ctrl.Call(m, "ContainsID", docID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// TermsIterable indicates an expected call of TermsIterable -func (mr *MockSegmentMockRecorder) TermsIterable() *gomock.Call { +// ContainsID indicates an expected call of ContainsID +func (mr *MockSegmentMockRecorder) ContainsID(docID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TermsIterable", reflect.TypeOf((*MockSegment)(nil).TermsIterable)) -} - -// MockMutableSegment is a mock of MutableSegment interface -type MockMutableSegment struct { - ctrl *gomock.Controller - recorder *MockMutableSegmentMockRecorder -} - -// MockMutableSegmentMockRecorder is the mock recorder for MockMutableSegment -type MockMutableSegmentMockRecorder struct { - mock *MockMutableSegment -} - -// NewMockMutableSegment creates a new mock instance -func NewMockMutableSegment(ctrl *gomock.Controller) *MockMutableSegment { - mock := &MockMutableSegment{ctrl: ctrl} - mock.recorder = &MockMutableSegmentMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockMutableSegment) EXPECT() *MockMutableSegmentMockRecorder { - return m.recorder + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainsID", reflect.TypeOf((*MockSegment)(nil).ContainsID), docID) } -// AllDocs mocks base method -func (m *MockMutableSegment) AllDocs() (index.IDDocIterator, error) { +// Reader mocks base method +func (m *MockSegment) Reader() (index.Reader, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AllDocs") - ret0, _ := ret[0].(index.IDDocIterator) + ret := m.ctrl.Call(m, "Reader") + ret0, _ := ret[0].(index.Reader) ret1, _ := ret[1].(error) return ret0, ret1 } -// AllDocs indicates an expected call of AllDocs -func (mr *MockMutableSegmentMockRecorder) AllDocs() *gomock.Call { +// Reader indicates an expected call of Reader +func (mr *MockSegmentMockRecorder) Reader() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllDocs", reflect.TypeOf((*MockMutableSegment)(nil).AllDocs)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockSegment)(nil).Reader)) } // Close mocks base method -func (m *MockMutableSegment) Close() error { +func (m *MockSegment) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) @@ -190,42 +138,36 @@ func (m *MockMutableSegment) Close() error { } // Close indicates an expected call of Close -func (mr *MockMutableSegmentMockRecorder) Close() *gomock.Call { +func (mr *MockSegmentMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMutableSegment)(nil).Close)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockSegment)(nil).Close)) } -// ContainsID mocks base method -func (m *MockMutableSegment) ContainsID(arg0 []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ContainsID", arg0) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 +// MockFieldsIterable is a mock of FieldsIterable interface +type MockFieldsIterable struct { + ctrl *gomock.Controller + recorder *MockFieldsIterableMockRecorder } -// ContainsID indicates an expected call of ContainsID -func (mr *MockMutableSegmentMockRecorder) ContainsID(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainsID", reflect.TypeOf((*MockMutableSegment)(nil).ContainsID), arg0) +// MockFieldsIterableMockRecorder is the mock recorder for MockFieldsIterable +type MockFieldsIterableMockRecorder struct { + mock *MockFieldsIterable } -// Docs mocks base method -func (m *MockMutableSegment) Docs() []doc.Document { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Docs") - ret0, _ := ret[0].([]doc.Document) - return ret0 +// NewMockFieldsIterable creates a new mock instance +func NewMockFieldsIterable(ctrl *gomock.Controller) *MockFieldsIterable { + mock := &MockFieldsIterable{ctrl: ctrl} + mock.recorder = &MockFieldsIterableMockRecorder{mock} + return mock } -// Docs indicates an expected call of Docs -func (mr *MockMutableSegmentMockRecorder) Docs() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Docs", reflect.TypeOf((*MockMutableSegment)(nil).Docs)) +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockFieldsIterable) EXPECT() *MockFieldsIterableMockRecorder { + return m.recorder } // Fields mocks base method -func (m *MockMutableSegment) Fields() (FieldsIterator, error) { +func (m *MockFieldsIterable) Fields() (FieldsIterator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Fields") ret0, _ := ret[0].(FieldsIterator) @@ -234,202 +176,601 @@ func (m *MockMutableSegment) Fields() (FieldsIterator, error) { } // Fields indicates an expected call of Fields -func (mr *MockMutableSegmentMockRecorder) Fields() *gomock.Call { +func (mr *MockFieldsIterableMockRecorder) Fields() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockMutableSegment)(nil).Fields)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockFieldsIterable)(nil).Fields)) } -// FieldsIterable mocks base method -func (m *MockMutableSegment) FieldsIterable() FieldsIterable { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FieldsIterable") - ret0, _ := ret[0].(FieldsIterable) - return ret0 +// MockTermsIterable is a mock of TermsIterable interface +type MockTermsIterable struct { + ctrl *gomock.Controller + recorder *MockTermsIterableMockRecorder } -// FieldsIterable indicates an expected call of FieldsIterable -func (mr *MockMutableSegmentMockRecorder) FieldsIterable() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsIterable", reflect.TypeOf((*MockMutableSegment)(nil).FieldsIterable)) +// MockTermsIterableMockRecorder is the mock recorder for MockTermsIterable +type MockTermsIterableMockRecorder struct { + mock *MockTermsIterable } -// Insert mocks base method -func (m *MockMutableSegment) Insert(arg0 doc.Document) ([]byte, error) { +// NewMockTermsIterable creates a new mock instance +func NewMockTermsIterable(ctrl *gomock.Controller) *MockTermsIterable { + mock := &MockTermsIterable{ctrl: ctrl} + mock.recorder = &MockTermsIterableMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTermsIterable) EXPECT() *MockTermsIterableMockRecorder { + return m.recorder +} + +// Terms mocks base method +func (m *MockTermsIterable) Terms(field []byte) (TermsIterator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Insert", arg0) - ret0, _ := ret[0].([]byte) + ret := m.ctrl.Call(m, "Terms", field) + ret0, _ := ret[0].(TermsIterator) ret1, _ := ret[1].(error) return ret0, ret1 } -// Insert indicates an expected call of Insert -func (mr *MockMutableSegmentMockRecorder) Insert(arg0 interface{}) *gomock.Call { +// Terms indicates an expected call of Terms +func (mr *MockTermsIterableMockRecorder) Terms(field interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockMutableSegment)(nil).Insert), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockTermsIterable)(nil).Terms), field) } -// InsertBatch mocks base method -func (m *MockMutableSegment) InsertBatch(arg0 index.Batch) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InsertBatch", arg0) - ret0, _ := ret[0].(error) - return ret0 +// MockOrderedBytesIterator is a mock of OrderedBytesIterator interface +type MockOrderedBytesIterator struct { + ctrl *gomock.Controller + recorder *MockOrderedBytesIteratorMockRecorder } -// InsertBatch indicates an expected call of InsertBatch -func (mr *MockMutableSegmentMockRecorder) InsertBatch(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertBatch", reflect.TypeOf((*MockMutableSegment)(nil).InsertBatch), arg0) +// MockOrderedBytesIteratorMockRecorder is the mock recorder for MockOrderedBytesIterator +type MockOrderedBytesIteratorMockRecorder struct { + mock *MockOrderedBytesIterator } -// IsSealed mocks base method -func (m *MockMutableSegment) IsSealed() bool { +// NewMockOrderedBytesIterator creates a new mock instance +func NewMockOrderedBytesIterator(ctrl *gomock.Controller) *MockOrderedBytesIterator { + mock := &MockOrderedBytesIterator{ctrl: ctrl} + mock.recorder = &MockOrderedBytesIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOrderedBytesIterator) EXPECT() *MockOrderedBytesIteratorMockRecorder { + return m.recorder +} + +// Next mocks base method +func (m *MockOrderedBytesIterator) Next() bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsSealed") + ret := m.ctrl.Call(m, "Next") ret0, _ := ret[0].(bool) return ret0 } -// IsSealed indicates an expected call of IsSealed -func (mr *MockMutableSegmentMockRecorder) IsSealed() *gomock.Call { +// Next indicates an expected call of Next +func (mr *MockOrderedBytesIteratorMockRecorder) Next() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSealed", reflect.TypeOf((*MockMutableSegment)(nil).IsSealed)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockOrderedBytesIterator)(nil).Next)) } -// Offset mocks base method -func (m *MockMutableSegment) Offset() postings.ID { +// Current mocks base method +func (m *MockOrderedBytesIterator) Current() []byte { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Offset") - ret0, _ := ret[0].(postings.ID) + ret := m.ctrl.Call(m, "Current") + ret0, _ := ret[0].([]byte) return ret0 } -// Offset indicates an expected call of Offset -func (mr *MockMutableSegmentMockRecorder) Offset() *gomock.Call { +// Current indicates an expected call of Current +func (mr *MockOrderedBytesIteratorMockRecorder) Current() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Offset", reflect.TypeOf((*MockMutableSegment)(nil).Offset)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockOrderedBytesIterator)(nil).Current)) } -// Reader mocks base method -func (m *MockMutableSegment) Reader() (index.Reader, error) { +// Err mocks base method +func (m *MockOrderedBytesIterator) Err() error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Reader") - ret0, _ := ret[0].(index.Reader) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 } -// Reader indicates an expected call of Reader -func (mr *MockMutableSegmentMockRecorder) Reader() *gomock.Call { +// Err indicates an expected call of Err +func (mr *MockOrderedBytesIteratorMockRecorder) Err() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockMutableSegment)(nil).Reader)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockOrderedBytesIterator)(nil).Err)) } -// Reset mocks base method -func (m *MockMutableSegment) Reset(arg0 postings.ID) { +// Close mocks base method +func (m *MockOrderedBytesIterator) Close() error { m.ctrl.T.Helper() - m.ctrl.Call(m, "Reset", arg0) + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 } -// Reset indicates an expected call of Reset -func (mr *MockMutableSegmentMockRecorder) Reset(arg0 interface{}) *gomock.Call { +// Close indicates an expected call of Close +func (mr *MockOrderedBytesIteratorMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockMutableSegment)(nil).Reset), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockOrderedBytesIterator)(nil).Close)) } -// Seal mocks base method -func (m *MockMutableSegment) Seal() error { +// MockFieldsIterator is a mock of FieldsIterator interface +type MockFieldsIterator struct { + ctrl *gomock.Controller + recorder *MockFieldsIteratorMockRecorder +} + +// MockFieldsIteratorMockRecorder is the mock recorder for MockFieldsIterator +type MockFieldsIteratorMockRecorder struct { + mock *MockFieldsIterator +} + +// NewMockFieldsIterator creates a new mock instance +func NewMockFieldsIterator(ctrl *gomock.Controller) *MockFieldsIterator { + mock := &MockFieldsIterator{ctrl: ctrl} + mock.recorder = &MockFieldsIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockFieldsIterator) EXPECT() *MockFieldsIteratorMockRecorder { + return m.recorder +} + +// Next mocks base method +func (m *MockFieldsIterator) Next() bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Seal") - ret0, _ := ret[0].(error) + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(bool) return ret0 } -// Seal indicates an expected call of Seal -func (mr *MockMutableSegmentMockRecorder) Seal() *gomock.Call { +// Next indicates an expected call of Next +func (mr *MockFieldsIteratorMockRecorder) Next() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seal", reflect.TypeOf((*MockMutableSegment)(nil).Seal)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockFieldsIterator)(nil).Next)) } -// Size mocks base method -func (m *MockMutableSegment) Size() int64 { +// Current mocks base method +func (m *MockFieldsIterator) Current() []byte { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Size") - ret0, _ := ret[0].(int64) + ret := m.ctrl.Call(m, "Current") + ret0, _ := ret[0].([]byte) return ret0 } -// Size indicates an expected call of Size -func (mr *MockMutableSegmentMockRecorder) Size() *gomock.Call { +// Current indicates an expected call of Current +func (mr *MockFieldsIteratorMockRecorder) Current() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockMutableSegment)(nil).Size)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockFieldsIterator)(nil).Current)) } -// Terms mocks base method -func (m *MockMutableSegment) Terms(arg0 []byte) (TermsIterator, error) { +// Err mocks base method +func (m *MockFieldsIterator) Err() error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Terms", arg0) - ret0, _ := ret[0].(TermsIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 } -// Terms indicates an expected call of Terms -func (mr *MockMutableSegmentMockRecorder) Terms(arg0 interface{}) *gomock.Call { +// Err indicates an expected call of Err +func (mr *MockFieldsIteratorMockRecorder) Err() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockMutableSegment)(nil).Terms), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockFieldsIterator)(nil).Err)) } -// TermsIterable mocks base method -func (m *MockMutableSegment) TermsIterable() TermsIterable { +// Close mocks base method +func (m *MockFieldsIterator) Close() error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TermsIterable") - ret0, _ := ret[0].(TermsIterable) + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) return ret0 } -// TermsIterable indicates an expected call of TermsIterable -func (mr *MockMutableSegmentMockRecorder) TermsIterable() *gomock.Call { +// Close indicates an expected call of Close +func (mr *MockFieldsIteratorMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TermsIterable", reflect.TypeOf((*MockMutableSegment)(nil).TermsIterable)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockFieldsIterator)(nil).Close)) } -// MockBuilder is a mock of Builder interface -type MockBuilder struct { +// MockTermsIterator is a mock of TermsIterator interface +type MockTermsIterator struct { ctrl *gomock.Controller - recorder *MockBuilderMockRecorder + recorder *MockTermsIteratorMockRecorder } -// MockBuilderMockRecorder is the mock recorder for MockBuilder -type MockBuilderMockRecorder struct { - mock *MockBuilder +// MockTermsIteratorMockRecorder is the mock recorder for MockTermsIterator +type MockTermsIteratorMockRecorder struct { + mock *MockTermsIterator } -// NewMockBuilder creates a new mock instance -func NewMockBuilder(ctrl *gomock.Controller) *MockBuilder { - mock := &MockBuilder{ctrl: ctrl} - mock.recorder = &MockBuilderMockRecorder{mock} +// NewMockTermsIterator creates a new mock instance +func NewMockTermsIterator(ctrl *gomock.Controller) *MockTermsIterator { + mock := &MockTermsIterator{ctrl: ctrl} + mock.recorder = &MockTermsIteratorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use -func (m *MockBuilder) EXPECT() *MockBuilderMockRecorder { +func (m *MockTermsIterator) EXPECT() *MockTermsIteratorMockRecorder { return m.recorder } -// AllDocs mocks base method -func (m *MockBuilder) AllDocs() (index.IDDocIterator, error) { +// Next mocks base method +func (m *MockTermsIterator) Next() bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AllDocs") - ret0, _ := ret[0].(index.IDDocIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Next indicates an expected call of Next +func (mr *MockTermsIteratorMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockTermsIterator)(nil).Next)) +} + +// Current mocks base method +func (m *MockTermsIterator) Current() ([]byte, postings.List) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Current") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(postings.List) + return ret0, ret1 +} + +// Current indicates an expected call of Current +func (mr *MockTermsIteratorMockRecorder) Current() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockTermsIterator)(nil).Current)) +} + +// Err mocks base method +func (m *MockTermsIterator) Err() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 +} + +// Err indicates an expected call of Err +func (mr *MockTermsIteratorMockRecorder) Err() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockTermsIterator)(nil).Err)) +} + +// Close mocks base method +func (m *MockTermsIterator) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close +func (mr *MockTermsIteratorMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockTermsIterator)(nil).Close)) +} + +// MockMutableSegment is a mock of MutableSegment interface +type MockMutableSegment struct { + ctrl *gomock.Controller + recorder *MockMutableSegmentMockRecorder +} + +// MockMutableSegmentMockRecorder is the mock recorder for MockMutableSegment +type MockMutableSegmentMockRecorder struct { + mock *MockMutableSegment +} + +// NewMockMutableSegment creates a new mock instance +func NewMockMutableSegment(ctrl *gomock.Controller) *MockMutableSegment { + mock := &MockMutableSegment{ctrl: ctrl} + mock.recorder = &MockMutableSegmentMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockMutableSegment) EXPECT() *MockMutableSegmentMockRecorder { + return m.recorder +} + +// FieldsIterable mocks base method +func (m *MockMutableSegment) FieldsIterable() FieldsIterable { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FieldsIterable") + ret0, _ := ret[0].(FieldsIterable) + return ret0 +} + +// FieldsIterable indicates an expected call of FieldsIterable +func (mr *MockMutableSegmentMockRecorder) FieldsIterable() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsIterable", reflect.TypeOf((*MockMutableSegment)(nil).FieldsIterable)) +} + +// TermsIterable mocks base method +func (m *MockMutableSegment) TermsIterable() TermsIterable { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TermsIterable") + ret0, _ := ret[0].(TermsIterable) + return ret0 +} + +// TermsIterable indicates an expected call of TermsIterable +func (mr *MockMutableSegmentMockRecorder) TermsIterable() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TermsIterable", reflect.TypeOf((*MockMutableSegment)(nil).TermsIterable)) +} + +// Size mocks base method +func (m *MockMutableSegment) Size() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Size") + ret0, _ := ret[0].(int64) + return ret0 +} + +// Size indicates an expected call of Size +func (mr *MockMutableSegmentMockRecorder) Size() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockMutableSegment)(nil).Size)) +} + +// ContainsID mocks base method +func (m *MockMutableSegment) ContainsID(docID []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainsID", docID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainsID indicates an expected call of ContainsID +func (mr *MockMutableSegmentMockRecorder) ContainsID(docID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainsID", reflect.TypeOf((*MockMutableSegment)(nil).ContainsID), docID) +} + +// Reader mocks base method +func (m *MockMutableSegment) Reader() (index.Reader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reader") + ret0, _ := ret[0].(index.Reader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reader indicates an expected call of Reader +func (mr *MockMutableSegmentMockRecorder) Reader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockMutableSegment)(nil).Reader)) +} + +// Close mocks base method +func (m *MockMutableSegment) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close +func (mr *MockMutableSegmentMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMutableSegment)(nil).Close)) +} + +// Fields mocks base method +func (m *MockMutableSegment) Fields() (FieldsIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Fields") + ret0, _ := ret[0].(FieldsIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Fields indicates an expected call of Fields +func (mr *MockMutableSegmentMockRecorder) Fields() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockMutableSegment)(nil).Fields)) +} + +// Terms mocks base method +func (m *MockMutableSegment) Terms(field []byte) (TermsIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Terms", field) + ret0, _ := ret[0].(TermsIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Terms indicates an expected call of Terms +func (mr *MockMutableSegmentMockRecorder) Terms(field interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockMutableSegment)(nil).Terms), field) +} + +// Reset mocks base method +func (m *MockMutableSegment) Reset(offset postings.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset", offset) +} + +// Reset indicates an expected call of Reset +func (mr *MockMutableSegmentMockRecorder) Reset(offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockMutableSegment)(nil).Reset), offset) +} + +// Docs mocks base method +func (m *MockMutableSegment) Docs() []doc.Document { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Docs") + ret0, _ := ret[0].([]doc.Document) + return ret0 +} + +// Docs indicates an expected call of Docs +func (mr *MockMutableSegmentMockRecorder) Docs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Docs", reflect.TypeOf((*MockMutableSegment)(nil).Docs)) +} + +// AllDocs mocks base method +func (m *MockMutableSegment) AllDocs() (index.IDDocIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllDocs") + ret0, _ := ret[0].(index.IDDocIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} // AllDocs indicates an expected call of AllDocs -func (mr *MockBuilderMockRecorder) AllDocs() *gomock.Call { +func (mr *MockMutableSegmentMockRecorder) AllDocs() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllDocs", reflect.TypeOf((*MockBuilder)(nil).AllDocs)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllDocs", reflect.TypeOf((*MockMutableSegment)(nil).AllDocs)) +} + +// Insert mocks base method +func (m *MockMutableSegment) Insert(d doc.Document) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Insert", d) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Insert indicates an expected call of Insert +func (mr *MockMutableSegmentMockRecorder) Insert(d interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockMutableSegment)(nil).Insert), d) +} + +// InsertBatch mocks base method +func (m *MockMutableSegment) InsertBatch(b index.Batch) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InsertBatch", b) + ret0, _ := ret[0].(error) + return ret0 +} + +// InsertBatch indicates an expected call of InsertBatch +func (mr *MockMutableSegmentMockRecorder) InsertBatch(b interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertBatch", reflect.TypeOf((*MockMutableSegment)(nil).InsertBatch), b) +} + +// Offset mocks base method +func (m *MockMutableSegment) Offset() postings.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Offset") + ret0, _ := ret[0].(postings.ID) + return ret0 +} + +// Offset indicates an expected call of Offset +func (mr *MockMutableSegmentMockRecorder) Offset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Offset", reflect.TypeOf((*MockMutableSegment)(nil).Offset)) +} + +// Seal mocks base method +func (m *MockMutableSegment) Seal() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Seal") + ret0, _ := ret[0].(error) + return ret0 +} + +// Seal indicates an expected call of Seal +func (mr *MockMutableSegmentMockRecorder) Seal() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seal", reflect.TypeOf((*MockMutableSegment)(nil).Seal)) +} + +// IsSealed mocks base method +func (m *MockMutableSegment) IsSealed() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsSealed") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsSealed indicates an expected call of IsSealed +func (mr *MockMutableSegmentMockRecorder) IsSealed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSealed", reflect.TypeOf((*MockMutableSegment)(nil).IsSealed)) +} + +// MockBuilder is a mock of Builder interface +type MockBuilder struct { + ctrl *gomock.Controller + recorder *MockBuilderMockRecorder +} + +// MockBuilderMockRecorder is the mock recorder for MockBuilder +type MockBuilderMockRecorder struct { + mock *MockBuilder +} + +// NewMockBuilder creates a new mock instance +func NewMockBuilder(ctrl *gomock.Controller) *MockBuilder { + mock := &MockBuilder{ctrl: ctrl} + mock.recorder = &MockBuilderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockBuilder) EXPECT() *MockBuilderMockRecorder { + return m.recorder +} + +// Fields mocks base method +func (m *MockBuilder) Fields() (FieldsIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Fields") + ret0, _ := ret[0].(FieldsIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Fields indicates an expected call of Fields +func (mr *MockBuilderMockRecorder) Fields() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockBuilder)(nil).Fields)) +} + +// Terms mocks base method +func (m *MockBuilder) Terms(field []byte) (TermsIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Terms", field) + ret0, _ := ret[0].(TermsIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Terms indicates an expected call of Terms +func (mr *MockBuilderMockRecorder) Terms(field interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockBuilder)(nil).Terms), field) +} + +// Reset mocks base method +func (m *MockBuilder) Reset(offset postings.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset", offset) +} + +// Reset indicates an expected call of Reset +func (mr *MockBuilderMockRecorder) Reset(offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockBuilder)(nil).Reset), offset) } // Docs mocks base method @@ -446,8 +787,46 @@ func (mr *MockBuilderMockRecorder) Docs() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Docs", reflect.TypeOf((*MockBuilder)(nil).Docs)) } +// AllDocs mocks base method +func (m *MockBuilder) AllDocs() (index.IDDocIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllDocs") + ret0, _ := ret[0].(index.IDDocIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AllDocs indicates an expected call of AllDocs +func (mr *MockBuilderMockRecorder) AllDocs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllDocs", reflect.TypeOf((*MockBuilder)(nil).AllDocs)) +} + +// MockDocumentsBuilder is a mock of DocumentsBuilder interface +type MockDocumentsBuilder struct { + ctrl *gomock.Controller + recorder *MockDocumentsBuilderMockRecorder +} + +// MockDocumentsBuilderMockRecorder is the mock recorder for MockDocumentsBuilder +type MockDocumentsBuilderMockRecorder struct { + mock *MockDocumentsBuilder +} + +// NewMockDocumentsBuilder creates a new mock instance +func NewMockDocumentsBuilder(ctrl *gomock.Controller) *MockDocumentsBuilder { + mock := &MockDocumentsBuilder{ctrl: ctrl} + mock.recorder = &MockDocumentsBuilderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockDocumentsBuilder) EXPECT() *MockDocumentsBuilderMockRecorder { + return m.recorder +} + // Fields mocks base method -func (m *MockBuilder) Fields() (FieldsIterator, error) { +func (m *MockDocumentsBuilder) Fields() (FieldsIterator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Fields") ret0, _ := ret[0].(FieldsIterator) @@ -456,34 +835,200 @@ func (m *MockBuilder) Fields() (FieldsIterator, error) { } // Fields indicates an expected call of Fields -func (mr *MockBuilderMockRecorder) Fields() *gomock.Call { +func (mr *MockDocumentsBuilderMockRecorder) Fields() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockBuilder)(nil).Fields)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockDocumentsBuilder)(nil).Fields)) +} + +// Terms mocks base method +func (m *MockDocumentsBuilder) Terms(field []byte) (TermsIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Terms", field) + ret0, _ := ret[0].(TermsIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Terms indicates an expected call of Terms +func (mr *MockDocumentsBuilderMockRecorder) Terms(field interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockDocumentsBuilder)(nil).Terms), field) } // Reset mocks base method -func (m *MockBuilder) Reset(arg0 postings.ID) { +func (m *MockDocumentsBuilder) Reset(offset postings.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Reset", arg0) + m.ctrl.Call(m, "Reset", offset) } // Reset indicates an expected call of Reset -func (mr *MockBuilderMockRecorder) Reset(arg0 interface{}) *gomock.Call { +func (mr *MockDocumentsBuilderMockRecorder) Reset(offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockDocumentsBuilder)(nil).Reset), offset) +} + +// Docs mocks base method +func (m *MockDocumentsBuilder) Docs() []doc.Document { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Docs") + ret0, _ := ret[0].([]doc.Document) + return ret0 +} + +// Docs indicates an expected call of Docs +func (mr *MockDocumentsBuilderMockRecorder) Docs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Docs", reflect.TypeOf((*MockDocumentsBuilder)(nil).Docs)) +} + +// AllDocs mocks base method +func (m *MockDocumentsBuilder) AllDocs() (index.IDDocIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllDocs") + ret0, _ := ret[0].(index.IDDocIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AllDocs indicates an expected call of AllDocs +func (mr *MockDocumentsBuilderMockRecorder) AllDocs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllDocs", reflect.TypeOf((*MockDocumentsBuilder)(nil).AllDocs)) +} + +// Insert mocks base method +func (m *MockDocumentsBuilder) Insert(d doc.Document) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Insert", d) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Insert indicates an expected call of Insert +func (mr *MockDocumentsBuilderMockRecorder) Insert(d interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockDocumentsBuilder)(nil).Insert), d) +} + +// InsertBatch mocks base method +func (m *MockDocumentsBuilder) InsertBatch(b index.Batch) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InsertBatch", b) + ret0, _ := ret[0].(error) + return ret0 +} + +// InsertBatch indicates an expected call of InsertBatch +func (mr *MockDocumentsBuilderMockRecorder) InsertBatch(b interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertBatch", reflect.TypeOf((*MockDocumentsBuilder)(nil).InsertBatch), b) +} + +// MockSegmentsBuilder is a mock of SegmentsBuilder interface +type MockSegmentsBuilder struct { + ctrl *gomock.Controller + recorder *MockSegmentsBuilderMockRecorder +} + +// MockSegmentsBuilderMockRecorder is the mock recorder for MockSegmentsBuilder +type MockSegmentsBuilderMockRecorder struct { + mock *MockSegmentsBuilder +} + +// NewMockSegmentsBuilder creates a new mock instance +func NewMockSegmentsBuilder(ctrl *gomock.Controller) *MockSegmentsBuilder { + mock := &MockSegmentsBuilder{ctrl: ctrl} + mock.recorder = &MockSegmentsBuilderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSegmentsBuilder) EXPECT() *MockSegmentsBuilderMockRecorder { + return m.recorder +} + +// Fields mocks base method +func (m *MockSegmentsBuilder) Fields() (FieldsIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Fields") + ret0, _ := ret[0].(FieldsIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Fields indicates an expected call of Fields +func (mr *MockSegmentsBuilderMockRecorder) Fields() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockBuilder)(nil).Reset), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockSegmentsBuilder)(nil).Fields)) } // Terms mocks base method -func (m *MockBuilder) Terms(arg0 []byte) (TermsIterator, error) { +func (m *MockSegmentsBuilder) Terms(field []byte) (TermsIterator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Terms", arg0) + ret := m.ctrl.Call(m, "Terms", field) ret0, _ := ret[0].(TermsIterator) ret1, _ := ret[1].(error) return ret0, ret1 } // Terms indicates an expected call of Terms -func (mr *MockBuilderMockRecorder) Terms(arg0 interface{}) *gomock.Call { +func (mr *MockSegmentsBuilderMockRecorder) Terms(field interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockSegmentsBuilder)(nil).Terms), field) +} + +// Reset mocks base method +func (m *MockSegmentsBuilder) Reset(offset postings.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset", offset) +} + +// Reset indicates an expected call of Reset +func (mr *MockSegmentsBuilderMockRecorder) Reset(offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockSegmentsBuilder)(nil).Reset), offset) +} + +// Docs mocks base method +func (m *MockSegmentsBuilder) Docs() []doc.Document { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Docs") + ret0, _ := ret[0].([]doc.Document) + return ret0 +} + +// Docs indicates an expected call of Docs +func (mr *MockSegmentsBuilderMockRecorder) Docs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Docs", reflect.TypeOf((*MockSegmentsBuilder)(nil).Docs)) +} + +// AllDocs mocks base method +func (m *MockSegmentsBuilder) AllDocs() (index.IDDocIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllDocs") + ret0, _ := ret[0].(index.IDDocIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AllDocs indicates an expected call of AllDocs +func (mr *MockSegmentsBuilderMockRecorder) AllDocs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllDocs", reflect.TypeOf((*MockSegmentsBuilder)(nil).AllDocs)) +} + +// AddSegments mocks base method +func (m *MockSegmentsBuilder) AddSegments(segments []Segment) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddSegments", segments) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddSegments indicates an expected call of AddSegments +func (mr *MockSegmentsBuilderMockRecorder) AddSegments(segments interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockBuilder)(nil).Terms), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSegments", reflect.TypeOf((*MockSegmentsBuilder)(nil).AddSegments), segments) } diff --git a/src/m3ninx/search/proptest/query_gen.go b/src/m3ninx/search/proptest/query_gen.go index b3b18a1e8c..f7e6820345 100644 --- a/src/m3ninx/search/proptest/query_gen.go +++ b/src/m3ninx/search/proptest/query_gen.go @@ -187,5 +187,3 @@ func GenQuery(docs []doc.Document) gopter.Gen { GenConjunctionQuery(docs), GenDisjunctionQuery(docs)) } - -// Ge