diff --git a/src/cmd/services/m3dbnode/config/bootstrap.go b/src/cmd/services/m3dbnode/config/bootstrap.go index 09d618b82c..fdc163116b 100644 --- a/src/cmd/services/m3dbnode/config/bootstrap.go +++ b/src/cmd/services/m3dbnode/config/bootstrap.go @@ -279,8 +279,8 @@ func (bsc BootstrapConfiguration) New( adminClient client.AdminClient, ) (bootstrap.ProcessProvider, error) { idxOpts := opts.IndexOptions() - compactor, err := compaction.NewCompactor(idxOpts.DocumentArrayPool(), - index.DocumentArrayPoolCapacity, + compactor, err := compaction.NewCompactor(idxOpts.MetadataArrayPool(), + index.MetadataArrayPoolCapacity, idxOpts.SegmentBuilderOptions(), idxOpts.FSTSegmentOptions(), compaction.CompactorOptions{ diff --git a/src/dbnode/integration/integration.go b/src/dbnode/integration/integration.go index 3c61c170d3..1aa2d56d13 100644 --- a/src/dbnode/integration/integration.go +++ b/src/dbnode/integration/integration.go @@ -430,8 +430,8 @@ func newCompactor( } func newCompactorWithErr(opts index.Options) (*compaction.Compactor, error) { - return compaction.NewCompactor(opts.DocumentArrayPool(), - index.DocumentArrayPoolCapacity, + return compaction.NewCompactor(opts.MetadataArrayPool(), + index.MetadataArrayPoolCapacity, opts.SegmentBuilderOptions(), opts.FSTSegmentOptions(), compaction.CompactorOptions{ diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go index 8848be7ed3..79b0d58398 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go @@ -401,12 +401,12 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult( seriesCachePolicy = ropts.SeriesCachePolicy() timesWithErrors []time.Time nsCtx = namespace.NewContextFrom(ns) - docsPool = s.opts.IndexOptions().DocumentArrayPool() - batch = docsPool.Get() + metadataPool = s.opts.IndexOptions().MetadataArrayPool() + batch = metadataPool.Get() totalEntries int totalFulfilledRanges = result.NewShardTimeRanges() ) - defer docsPool.Put(batch) + defer metadataPool.Put(batch) requestedRanges := timeWindowReaders.Ranges remainingRanges := requestedRanges.Copy() @@ -740,7 +740,7 @@ func (s *fileSystemSource) readNextEntryAndMaybeIndex( batch = append(batch, d) - if len(batch) >= index.DocumentArrayPoolCapacity { + if len(batch) >= index.MetadataArrayPoolCapacity { return builder.FlushBatch(batch) } @@ -857,8 +857,8 @@ func (s *fileSystemSource) read( builder := result.NewIndexBuilder(segBuilder) indexOpts := s.opts.IndexOptions() - compactor, err := compaction.NewCompactor(indexOpts.DocumentArrayPool(), - index.DocumentArrayPoolCapacity, + compactor, err := compaction.NewCompactor(indexOpts.MetadataArrayPool(), + index.MetadataArrayPoolCapacity, indexOpts.SegmentBuilderOptions(), indexOpts.FSTSegmentOptions(), compaction.CompactorOptions{ diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go index 6e2b509339..89f0cd6809 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go @@ -79,8 +79,8 @@ var ( func newTestOptions(t require.TestingT, filePathPrefix string) Options { idxOpts := index.NewOptions() - compactor, err := compaction.NewCompactor(idxOpts.DocumentArrayPool(), - index.DocumentArrayPoolCapacity, + compactor, err := compaction.NewCompactor(idxOpts.MetadataArrayPool(), + index.MetadataArrayPoolCapacity, idxOpts.SegmentBuilderOptions(), idxOpts.FSTSegmentOptions(), compaction.CompactorOptions{ diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/peers_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/peers_test.go index d30b675457..d223877d8f 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/peers/peers_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/peers_test.go @@ -46,8 +46,8 @@ func TestNewPeersBootstrapper(t *testing.T) { defer ctrl.Finish() idxOpts := index.NewOptions() - compactor, err := compaction.NewCompactor(idxOpts.DocumentArrayPool(), - index.DocumentArrayPoolCapacity, + compactor, err := compaction.NewCompactor(idxOpts.MetadataArrayPool(), + index.MetadataArrayPoolCapacity, idxOpts.SegmentBuilderOptions(), idxOpts.FSTSegmentOptions(), compaction.CompactorOptions{ diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go index d25dd4b7d7..c8d409cfad 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go @@ -750,8 +750,8 @@ func (s *peersSource) readIndex( builder := result.NewIndexBuilder(segBuilder) indexOpts := s.opts.IndexOptions() - compactor, err := compaction.NewCompactor(indexOpts.DocumentArrayPool(), - index.DocumentArrayPoolCapacity, + compactor, err := compaction.NewCompactor(indexOpts.MetadataArrayPool(), + index.MetadataArrayPoolCapacity, indexOpts.SegmentBuilderOptions(), indexOpts.FSTSegmentOptions(), compaction.CompactorOptions{ @@ -831,13 +831,13 @@ func (s *peersSource) processReaders( resultLock *sync.Mutex, ) (result.ShardTimeRanges, []time.Time) { var ( - docsPool = s.opts.IndexOptions().DocumentArrayPool() - batch = docsPool.Get() + metadataPool = s.opts.IndexOptions().MetadataArrayPool() + batch = metadataPool.Get() timesWithErrors []time.Time totalEntries int ) defer func() { - docsPool.Put(batch) + metadataPool.Put(batch) // Return readers to pool. for _, shardReaders := range timeWindowReaders.Readers { for _, r := range shardReaders.Readers { @@ -1031,7 +1031,7 @@ func (s *peersSource) readNextEntryAndMaybeIndex( batch = append(batch, d) - if len(batch) >= index.DocumentArrayPoolCapacity { + if len(batch) >= index.MetadataArrayPoolCapacity { return builder.FlushBatch(batch) } diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go index 25a6d565bc..7242a47fe7 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go @@ -85,8 +85,8 @@ type namespaceOption func(namespace.Options) namespace.Options func newTestDefaultOpts(t *testing.T, ctrl *gomock.Controller) Options { idxOpts := index.NewOptions() - compactor, err := compaction.NewCompactor(idxOpts.DocumentArrayPool(), - index.DocumentArrayPoolCapacity, + compactor, err := compaction.NewCompactor(idxOpts.MetadataArrayPool(), + index.MetadataArrayPoolCapacity, idxOpts.SegmentBuilderOptions(), idxOpts.FSTSegmentOptions(), compaction.CompactorOptions{ diff --git a/src/dbnode/storage/index/compaction/compactor.go b/src/dbnode/storage/index/compaction/compactor.go index 7ec097fcc0..b69d8908f6 100644 --- a/src/dbnode/storage/index/compaction/compactor.go +++ b/src/dbnode/storage/index/compaction/compactor.go @@ -47,7 +47,7 @@ type Compactor struct { opts CompactorOptions writer fst.Writer - docsPool doc.DocumentArrayPool + metadataPool doc.MetadataArrayPool docsMaxBatch int fstOpts fst.Options builder segment.SegmentsBuilder @@ -71,7 +71,7 @@ type CompactorOptions struct { // NewCompactor returns a new compactor which reuses buffers // to avoid allocating intermediate buffers when compacting. func NewCompactor( - docsPool doc.DocumentArrayPool, + metadataPool doc.MetadataArrayPool, docsMaxBatch int, builderOpts builder.Options, fstOpts fst.Options, @@ -88,7 +88,7 @@ func NewCompactor( return &Compactor{ opts: opts, writer: writer, - docsPool: docsPool, + metadataPool: metadataPool, docsMaxBatch: docsMaxBatch, builder: builder.NewBuilderFromSegments(builderOpts), fstOpts: fstOpts, @@ -147,9 +147,9 @@ func (c *Compactor) CompactUsingBuilder( } // Need to combine segments first - batch := c.docsPool.Get() + batch := c.metadataPool.Get() defer func() { - c.docsPool.Put(batch) + c.metadataPool.Put(batch) }() // flushBatch is declared to reuse the same code from the @@ -374,7 +374,7 @@ func (c *Compactor) Close() error { c.closed = true c.writer = nil - c.docsPool = nil + c.metadataPool = nil c.fstOpts = nil c.builder = nil c.buff = nil diff --git a/src/dbnode/storage/index/compaction/compactor_test.go b/src/dbnode/storage/index/compaction/compactor_test.go index 631d8c892b..e0cb38249c 100644 --- a/src/dbnode/storage/index/compaction/compactor_test.go +++ b/src/dbnode/storage/index/compaction/compactor_test.go @@ -69,15 +69,15 @@ var ( }, } - testDocsMaxBatch = 8 - testDocsPool = doc.NewDocumentArrayPool(doc.DocumentArrayPoolOpts{ + testMetadataMaxBatch = 8 + testMetadataPool = doc.NewMetadataArrayPool(doc.MetadataArrayPoolOpts{ Options: pool.NewObjectPoolOptions().SetSize(1), - Capacity: testDocsMaxBatch, + Capacity: testMetadataMaxBatch, }) ) func init() { - testDocsPool.Init() + testMetadataPool.Init() } func TestCompactorSingleMutableSegment(t *testing.T) { @@ -90,7 +90,7 @@ func TestCompactorSingleMutableSegment(t *testing.T) { _, err = seg.Insert(testDocuments[1]) require.NoError(t, err) - compactor, err := NewCompactor(testDocsPool, testDocsMaxBatch, + compactor, err := NewCompactor(testMetadataPool, testMetadataMaxBatch, testBuilderSegmentOptions, testFSTSegmentOptions, CompactorOptions{}) require.NoError(t, err) @@ -114,7 +114,7 @@ func TestCompactorSingleMutableSegmentWithMmapDocsData(t *testing.T) { _, err = seg.Insert(testDocuments[1]) require.NoError(t, err) - compactor, err := NewCompactor(testDocsPool, testDocsMaxBatch, + compactor, err := NewCompactor(testMetadataPool, testMetadataMaxBatch, testBuilderSegmentOptions, testFSTSegmentOptions, CompactorOptions{ MmapDocsData: true, }) @@ -143,7 +143,7 @@ func TestCompactorManySegments(t *testing.T) { _, err = seg2.Insert(testDocuments[1]) require.NoError(t, err) - compactor, err := NewCompactor(testDocsPool, testDocsMaxBatch, + compactor, err := NewCompactor(testMetadataPool, testMetadataMaxBatch, testBuilderSegmentOptions, testFSTSegmentOptions, CompactorOptions{}) require.NoError(t, err) @@ -174,7 +174,7 @@ func TestCompactorCompactDuplicateIDsNoError(t *testing.T) { _, err = seg2.Insert(testDocuments[1]) require.NoError(t, err) - compactor, err := NewCompactor(testDocsPool, testDocsMaxBatch, + compactor, err := NewCompactor(testMetadataPool, testMetadataMaxBatch, testBuilderSegmentOptions, testFSTSegmentOptions, CompactorOptions{}) require.NoError(t, err) diff --git a/src/dbnode/storage/index/mutable_segments.go b/src/dbnode/storage/index/mutable_segments.go index 256bb4a313..4722f8e77f 100644 --- a/src/dbnode/storage/index/mutable_segments.go +++ b/src/dbnode/storage/index/mutable_segments.go @@ -781,8 +781,8 @@ func (m *mutableSegmentsCompact) allocLazyBuilderAndCompactorsWithLock( opts Options, ) error { var ( - err error - docsPool = opts.DocumentArrayPool() + err error + metadataPool = opts.MetadataArrayPool() ) if m.segmentBuilder == nil { builderOpts := opts.SegmentBuilderOptions(). @@ -795,8 +795,8 @@ func (m *mutableSegmentsCompact) allocLazyBuilderAndCompactorsWithLock( } if m.foregroundCompactor == nil { - m.foregroundCompactor, err = compaction.NewCompactor(docsPool, - DocumentArrayPoolCapacity, + m.foregroundCompactor, err = compaction.NewCompactor(metadataPool, + MetadataArrayPoolCapacity, opts.SegmentBuilderOptions(), opts.FSTSegmentOptions(), compaction.CompactorOptions{ @@ -814,8 +814,8 @@ func (m *mutableSegmentsCompact) allocLazyBuilderAndCompactorsWithLock( } if m.backgroundCompactor == nil { - m.backgroundCompactor, err = compaction.NewCompactor(docsPool, - DocumentArrayPoolCapacity, + m.backgroundCompactor, err = compaction.NewCompactor(metadataPool, + MetadataArrayPoolCapacity, opts.SegmentBuilderOptions(), opts.FSTSegmentOptions(), compaction.CompactorOptions{ diff --git a/src/dbnode/storage/index/options.go b/src/dbnode/storage/index/options.go index 2c5ca34829..9e8a288624 100644 --- a/src/dbnode/storage/index/options.go +++ b/src/dbnode/storage/index/options.go @@ -40,18 +40,26 @@ const ( // defaultIndexInsertMode sets the default indexing mode to synchronous. defaultIndexInsertMode = InsertSync - // documentArrayPool size in general: 256*256*sizeof(doc.Metadata) - // = 256 * 256 * 16 - // = 1mb (but with Go's heap probably 2mb) + // metadataArrayPool size in general: 256*256*sizeof(doc.Metadata) + // = 256 * 256 * 48 + // =~ 3mb // TODO(r): Make this configurable in a followup change. + metadataArrayPoolSize = 256 + // MetadataArrayPoolCapacity is the capacity of the metadata array pool. + MetadataArrayPoolCapacity = 256 + metadataArrayPoolMaxCapacity = 256 // Do not allow grows, since we know the size + + // documentArrayPool size in general: 256*256*sizeof(doc.Document) + // = 256 * 256 * 80 + // =~ 5mb documentArrayPoolSize = 256 - // DocumentArrayPoolCapacity is the capacity of the document array pool. + // DocumentArrayPoolCapacity is the capacity of the encoded document array pool. DocumentArrayPoolCapacity = 256 documentArrayPoolMaxCapacity = 256 // Do not allow grows, since we know the size // aggregateResultsEntryArrayPool size in general: 256*256*sizeof(doc.Field) - // = 256 * 256 * 16 - // = 1mb (but with Go's heap probably 2mb) + // = 256 * 256 * 48 + // =~ 3mb // TODO(prateek): Make this configurable in a followup change. aggregateResultsEntryArrayPoolSize = 256 aggregateResultsEntryArrayPoolCapacity = 256 @@ -65,6 +73,7 @@ var ( errOptionsAggResultsPoolUnspecified = errors.New("aggregate results pool is unset") errOptionsAggValuesPoolUnspecified = errors.New("aggregate values pool is unset") errOptionsDocPoolUnspecified = errors.New("docs array pool is unset") + errOptionsDocContainerPoolUnspecified = errors.New("doc container array pool is unset") errOptionsAggResultsEntryPoolUnspecified = errors.New("aggregate results entry array pool is unset") errIDGenerationDisabled = errors.New("id generation is disabled") errPostingsListCacheUnspecified = errors.New("postings list cache is unset") @@ -118,6 +127,7 @@ type opts struct { aggResultsPool AggregateResultsPool aggValuesPool AggregateValuesPool docArrayPool doc.DocumentArrayPool + metadataArrayPool doc.MetadataArrayPool aggResultsEntryArrayPool AggregateResultsEntryArrayPool foregroundCompactionPlannerOpts compaction.PlannerOptions backgroundCompactionPlannerOpts compaction.PlannerOptions @@ -150,6 +160,14 @@ func NewOptions() Options { }) docArrayPool.Init() + metadataArrayPool := doc.NewMetadataArrayPool(doc.MetadataArrayPoolOpts{ + Options: pool.NewObjectPoolOptions(). + SetSize(metadataArrayPoolSize), + Capacity: MetadataArrayPoolCapacity, + MaxCapacity: metadataArrayPoolMaxCapacity, + }) + metadataArrayPool.Init() + aggResultsEntryArrayPool := NewAggregateResultsEntryArrayPool(AggregateResultsEntryArrayPoolOpts{ Options: pool.NewObjectPoolOptions(). SetSize(aggregateResultsEntryArrayPoolSize), @@ -172,6 +190,7 @@ func NewOptions() Options { aggResultsPool: aggResultsPool, aggValuesPool: aggValuesPool, docArrayPool: docArrayPool, + metadataArrayPool: metadataArrayPool, aggResultsEntryArrayPool: aggResultsEntryArrayPool, foregroundCompactionPlannerOpts: defaultForegroundCompactionOpts, backgroundCompactionPlannerOpts: defaultBackgroundCompactionOpts, @@ -206,6 +225,9 @@ func (o *opts) Validate() error { if o.docArrayPool == nil { return errOptionsDocPoolUnspecified } + if o.metadataArrayPool == nil { + return errOptionsDocContainerPoolUnspecified + } if o.aggResultsEntryArrayPool == nil { return errOptionsAggResultsEntryPoolUnspecified } @@ -339,6 +361,16 @@ func (o *opts) DocumentArrayPool() doc.DocumentArrayPool { return o.docArrayPool } +func (o *opts) SetMetadataArrayPool(value doc.MetadataArrayPool) Options { + opts := *o + opts.metadataArrayPool = value + return &opts +} + +func (o *opts) MetadataArrayPool() doc.MetadataArrayPool { + return o.metadataArrayPool +} + func (o *opts) SetAggregateResultsEntryArrayPool(value AggregateResultsEntryArrayPool) Options { opts := *o opts.aggResultsEntryArrayPool = value diff --git a/src/dbnode/storage/index/types.go b/src/dbnode/storage/index/types.go index a37b7a6849..12ceca4859 100644 --- a/src/dbnode/storage/index/types.go +++ b/src/dbnode/storage/index/types.go @@ -937,6 +937,12 @@ type Options interface { // DocumentArrayPool returns the document array pool. DocumentArrayPool() doc.DocumentArrayPool + // SetMetadataArrayPool sets the document container array pool. + SetMetadataArrayPool(value doc.MetadataArrayPool) Options + + // MetadataArrayPool returns the document container array pool. + MetadataArrayPool() doc.MetadataArrayPool + // SetAggregateResultsEntryArrayPool sets the aggregate results entry array pool. SetAggregateResultsEntryArrayPool(value AggregateResultsEntryArrayPool) Options diff --git a/src/m3ninx/doc/doc_arraypool_gen.go b/src/m3ninx/doc/doc_arraypool_gen.go index c52d4f4305..cc6dab1a25 100644 --- a/src/m3ninx/doc/doc_arraypool_gen.go +++ b/src/m3ninx/doc/doc_arraypool_gen.go @@ -48,20 +48,20 @@ import ( // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -// DocumentArrayPool provides a pool for metadata slices. +// DocumentArrayPool provides a pool for document slices. type DocumentArrayPool interface { // Init initializes the array pool, it needs to be called // before Get/Put use. Init() // Get returns the a slice from the pool. - Get() []Metadata + Get() []Document // Put returns the provided slice to the pool. - Put(elems []Metadata) + Put(elems []Document) } -type DocumentFinalizeFn func([]Metadata) []Metadata +type DocumentFinalizeFn func([]Document) []Document type DocumentArrayPoolOpts struct { Options pool.ObjectPoolOptions @@ -85,15 +85,15 @@ func NewDocumentArrayPool(opts DocumentArrayPoolOpts) DocumentArrayPool { func (p *DocumentArrPool) Init() { p.pool.Init(func() interface{} { - return make([]Metadata, 0, p.opts.Capacity) + return make([]Document, 0, p.opts.Capacity) }) } -func (p *DocumentArrPool) Get() []Metadata { - return p.pool.Get().([]Metadata) +func (p *DocumentArrPool) Get() []Document { + return p.pool.Get().([]Document) } -func (p *DocumentArrPool) Put(arr []Metadata) { +func (p *DocumentArrPool) Put(arr []Document) { arr = p.opts.FinalizeFn(arr) if max := p.opts.MaxCapacity; max > 0 && cap(arr) > max { return @@ -101,8 +101,8 @@ func (p *DocumentArrPool) Put(arr []Metadata) { p.pool.Put(arr) } -func defaultDocumentFinalizerFn(elems []Metadata) []Metadata { - var empty Metadata +func defaultDocumentFinalizerFn(elems []Document) []Document { + var empty Document for i := range elems { elems[i] = empty } @@ -110,16 +110,16 @@ func defaultDocumentFinalizerFn(elems []Metadata) []Metadata { return elems } -type DocumentArr []Metadata +type DocumentArr []Document -func (elems DocumentArr) grow(n int) []Metadata { +func (elems DocumentArr) grow(n int) []Document { if cap(elems) < n { - elems = make([]Metadata, n) + elems = make([]Document, n) } elems = elems[:n] // following compiler optimized memcpy impl // https://github.com/golang/go/wiki/CompilerOptimizations#optimized-memclr - var empty Metadata + var empty Document for i := range elems { elems[i] = empty } diff --git a/src/m3ninx/doc/metadata_arraypool_gen.go b/src/m3ninx/doc/metadata_arraypool_gen.go new file mode 100644 index 0000000000..09aab94011 --- /dev/null +++ b/src/m3ninx/doc/metadata_arraypool_gen.go @@ -0,0 +1,127 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// This file was automatically generated by genny. +// Any changes will be lost if this file is regenerated. +// see https://github.com/mauricelam/genny + +package doc + +import ( + "github.com/m3db/m3/src/x/pool" +) + +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// MetadataArrayPool provides a pool for metadata slices. +type MetadataArrayPool interface { + // Init initializes the array pool, it needs to be called + // before Get/Put use. + Init() + + // Get returns the a slice from the pool. + Get() []Metadata + + // Put returns the provided slice to the pool. + Put(elems []Metadata) +} + +type MetadataFinalizeFn func([]Metadata) []Metadata + +type MetadataArrayPoolOpts struct { + Options pool.ObjectPoolOptions + Capacity int + MaxCapacity int + FinalizeFn MetadataFinalizeFn +} + +type MetadataArrPool struct { + opts MetadataArrayPoolOpts + pool pool.ObjectPool +} + +func NewMetadataArrayPool(opts MetadataArrayPoolOpts) MetadataArrayPool { + if opts.FinalizeFn == nil { + opts.FinalizeFn = defaultMetadataFinalizerFn + } + p := pool.NewObjectPool(opts.Options) + return &MetadataArrPool{opts, p} +} + +func (p *MetadataArrPool) Init() { + p.pool.Init(func() interface{} { + return make([]Metadata, 0, p.opts.Capacity) + }) +} + +func (p *MetadataArrPool) Get() []Metadata { + return p.pool.Get().([]Metadata) +} + +func (p *MetadataArrPool) Put(arr []Metadata) { + arr = p.opts.FinalizeFn(arr) + if max := p.opts.MaxCapacity; max > 0 && cap(arr) > max { + return + } + p.pool.Put(arr) +} + +func defaultMetadataFinalizerFn(elems []Metadata) []Metadata { + var empty Metadata + for i := range elems { + elems[i] = empty + } + elems = elems[:0] + return elems +} + +type MetadataArr []Metadata + +func (elems MetadataArr) grow(n int) []Metadata { + if cap(elems) < n { + elems = make([]Metadata, n) + } + elems = elems[:n] + // following compiler optimized memcpy impl + // https://github.com/golang/go/wiki/CompilerOptimizations#optimized-memclr + var empty Metadata + for i := range elems { + elems[i] = empty + } + return elems +} diff --git a/src/m3ninx/generated-source-files.mk b/src/m3ninx/generated-source-files.mk index 4b85df122b..bfd8364e91 100644 --- a/src/m3ninx/generated-source-files.mk +++ b/src/m3ninx/generated-source-files.mk @@ -98,9 +98,10 @@ genny-map-segment-mem-fieldsmap: # generation rule for all generated arraypools .PHONY: genny-arraypool-all -genny-arraypool-all: \ - genny-arraypool-bytes-slice-array-pool \ - genny-arraypool-document-array-pool \ +genny-arraypool-all: \ + genny-arraypool-bytes-slice-array-pool \ + genny-arraypool-document-array-pool \ + genny-arraypool-metadata-array-pool \ # arraypool generation rule for ./x/bytes.SliceArrayPool .PHONY: genny-arraypool-bytes-slice-array-pool @@ -117,13 +118,25 @@ genny-arraypool-bytes-slice-array-pool: # arraypool generation rule for ./doc.DocumentArrayPool .PHONY: genny-arraypool-document-array-pool genny-arraypool-document-array-pool: + cd $(m3x_package_path) && make genny-arraypool \ + pkg=doc \ + elem_type=Document \ + target_package=$(m3ninx_package)/doc \ + out_file=doc_arraypool_gen.go \ + rename_type_prefix=Document \ + rename_type_middle=Document \ + rename_constructor=NewDocumentArrayPool \ + rename_gen_types=true \ + +# arraypool generation rule for ./doc.MetadataArrayPool +.PHONY: genny-arraypool-metadata-array-pool +genny-arraypool-metadata-array-pool: cd $(m3x_package_path) && make genny-arraypool \ pkg=doc \ elem_type=Metadata \ target_package=$(m3ninx_package)/doc \ - out_file=doc_arraypool_gen.go \ - rename_type_prefix=Document \ - rename_type_middle=Document \ - rename_constructor=NewDocumentArrayPool \ + out_file=metadata_arraypool_gen.go \ + rename_type_prefix=Metadata \ + rename_type_middle=Metadata \ + rename_constructor=NewMetadataArrayPool \ rename_gen_types=true \ -