Skip to content

Commit

Permalink
sstable: reduce block cache fragmentation
Browse files Browse the repository at this point in the history
Previously, the sstable writer contained heuristics to flush sstable
blocks when the size reached a certain threshold. In CRDB this is
defined as 32KiB. However, when these blocks are loaded into memory
additional metadata is allocated with the block causing the allocation
to go beyond this threshold. Since CRDB uses jemalloc, these allocations
use a 40KiB size class which leads to internal fragmentation and higher
memory usage. This commit decrements the block size threshold to reduce
internal memory fragmentation.

Informs: cockroachdb#999.
  • Loading branch information
CheranMahalingam committed Apr 16, 2024
1 parent c34894c commit 458a389
Show file tree
Hide file tree
Showing 10 changed files with 80 additions and 49 deletions.
9 changes: 5 additions & 4 deletions data_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble/bloom"
"github.com/cockroachdb/pebble/internal/base"
"github.com/cockroachdb/pebble/internal/cache"
"github.com/cockroachdb/pebble/internal/humanize"
"github.com/cockroachdb/pebble/internal/keyspan"
"github.com/cockroachdb/pebble/internal/private"
Expand Down Expand Up @@ -523,8 +524,8 @@ func runBuildRemoteCmd(td *datadriven.TestData, d *DB, storage remote.Storage) e
// Force two-level indexes if not already forced on or off.
blockSize = 5
}
writeOpts.BlockSize = int(blockSize)
writeOpts.IndexBlockSize = int(blockSize)
writeOpts.BlockSize = int(blockSize) + cache.ValueMetadataSize
writeOpts.IndexBlockSize = writeOpts.BlockSize

f, err := storage.CreateObject(path)
if err != nil {
Expand Down Expand Up @@ -1427,7 +1428,7 @@ func parseDBOptionsArgs(opts *Options, args []datadriven.CmdArg) error {
return err
}
for i := range opts.Levels {
opts.Levels[i].BlockSize = v
opts.Levels[i].BlockSize = v + cache.ValueMetadataSize
}
case "cache-size":
if opts.Cache != nil {
Expand All @@ -1445,7 +1446,7 @@ func parseDBOptionsArgs(opts *Options, args []datadriven.CmdArg) error {
return err
}
for i := range opts.Levels {
opts.Levels[i].IndexBlockSize = v
opts.Levels[i].IndexBlockSize = v + cache.ValueMetadataSize
}
case "target-file-size":
v, err := strconv.Atoi(cmdArg.Vals[0])
Expand Down
43 changes: 43 additions & 0 deletions internal/cache/value_cgo.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// Copyright 2024 The LevelDB-Go and Pebble Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.

//go:build ((!invariants && !tracing) || race) && cgo
// +build !invariants,!tracing race
// +build cgo

package cache

import (
"unsafe"

"github.com/cockroachdb/pebble/internal/manual"
)

// ValueMetadataSize denotes the number of bytes of metadata allocated for a
// cache entry.
const ValueMetadataSize = int(unsafe.Sizeof(Value{}))

func newValue(n int) *Value {
if n == 0 {
return nil
}

// When we're not performing leak detection, the lifetime of the returned
// Value is exactly the lifetime of the backing buffer and we can manually
// allocate both.
b := manual.New(ValueMetadataSize + n)
v := (*Value)(unsafe.Pointer(&b[0]))
v.buf = b[ValueMetadataSize:]
v.ref.init(1)
return v
}

func (v *Value) free() {
// When we're not performing leak detection, the Value and buffer were
// allocated contiguously.
n := ValueMetadataSize + cap(v.buf)
buf := (*[manual.MaxArrayLen]byte)(unsafe.Pointer(v))[:n:n]
v.buf = nil
manual.Free(buf)
}
4 changes: 4 additions & 0 deletions internal/cache/value_invariants.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ import (
"github.com/cockroachdb/pebble/internal/manual"
)

// ValueMetadataSize denotes the number of bytes of metadata allocated for a
// cache entry.
const ValueMetadataSize = 0

// newValue creates a Value with a manually managed buffer of size n.
//
// This definition of newValue is used when either the "invariants" or
Expand Down
45 changes: 8 additions & 37 deletions internal/cache/value_normal.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,56 +2,27 @@
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.

//go:build (!invariants && !tracing) || race
//go:build ((!invariants && !tracing) || race) && !cgo
// +build !invariants,!tracing race
// +build !cgo

package cache

import (
"unsafe"

"github.com/cockroachdb/pebble/internal/manual"
)

const valueSize = int(unsafe.Sizeof(Value{}))
// ValueMetadataSize denotes the number of bytes of metadata allocated for a
// cache entry.
const ValueMetadataSize = 0

func newValue(n int) *Value {
if n == 0 {
return nil
}

if !cgoEnabled {
// If Cgo is disabled then all memory is allocated from the Go heap and we
// can't play the trick below to combine the Value and buffer allocation.
v := &Value{buf: make([]byte, n)}
v.ref.init(1)
return v
}

// When we're not performing leak detection, the lifetime of the returned
// Value is exactly the lifetime of the backing buffer and we can manually
// allocate both.
//
// TODO(peter): It may be better to separate the allocation of the value and
// the buffer in order to reduce internal fragmentation in malloc. If the
// buffer is right at a power of 2, adding valueSize might push the
// allocation over into the next larger size.
b := manual.New(valueSize + n)
v := (*Value)(unsafe.Pointer(&b[0]))
v.buf = b[valueSize:]
// Since Cgo is disabled then all memory is allocated from the Go heap we
// can't play the trick below to combine the Value and buffer allocation.
v := &Value{buf: make([]byte, n)}
v.ref.init(1)
return v
}

func (v *Value) free() {
if !cgoEnabled {
return
}

// When we're not performing leak detection, the Value and buffer were
// allocated contiguously.
n := valueSize + cap(v.buf)
buf := (*[manual.MaxArrayLen]byte)(unsafe.Pointer(v))[:n:n]
v.buf = nil
manual.Free(buf)
}
6 changes: 5 additions & 1 deletion iterator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble/internal/base"
"github.com/cockroachdb/pebble/internal/bytealloc"
"github.com/cockroachdb/pebble/internal/cache"
"github.com/cockroachdb/pebble/internal/invalidating"
"github.com/cockroachdb/pebble/internal/manifest"
"github.com/cockroachdb/pebble/internal/testkeys"
Expand Down Expand Up @@ -1190,7 +1191,10 @@ func TestIteratorBlockIntervalFilter(t *testing.T) {
FormatMajorVersion: internalFormatNewest,
BlockPropertyCollectors: bpCollectors,
}
lo := LevelOptions{BlockSize: 1, IndexBlockSize: 1}
lo := LevelOptions{
BlockSize: 1 + cache.ValueMetadataSize,
IndexBlockSize: 1 + cache.ValueMetadataSize,
}
opts.Levels = append(opts.Levels, lo)

// Automatic compactions may compact away tombstones from L6, making
Expand Down
2 changes: 2 additions & 0 deletions sstable/data_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ func optsFromArgs(td *datadriven.TestData, writerOpts *WriterOptions) error {
if err != nil {
return err
}
writerOpts.BlockSize += cache.ValueMetadataSize
case "index-block-size":
if len(arg.Vals) != 1 {
return errors.Errorf("%s: arg %s expects 1 value", td.Cmd, arg.Key)
Expand All @@ -50,6 +51,7 @@ func optsFromArgs(td *datadriven.TestData, writerOpts *WriterOptions) error {
if err != nil {
return err
}
writerOpts.IndexBlockSize += cache.ValueMetadataSize
case "filter":
writerOpts.FilterPolicy = bloom.FilterPolicy(10)
case "comparer-split-4b-suffix":
Expand Down
9 changes: 7 additions & 2 deletions sstable/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,9 +236,12 @@ func (o WriterOptions) ensureDefaults() WriterOptions {
if o.BlockRestartInterval <= 0 {
o.BlockRestartInterval = base.DefaultBlockRestartInterval
}
if o.BlockSize <= 0 {
// The target block size is decremented to reduce internal fragmentation when
// blocks are loaded into the block cache.
if o.BlockSize <= cache.ValueMetadataSize {
o.BlockSize = base.DefaultBlockSize
}
o.BlockSize -= cache.ValueMetadataSize
if o.BlockSizeThreshold <= 0 {
o.BlockSizeThreshold = base.DefaultBlockSizeThreshold
}
Expand All @@ -248,8 +251,10 @@ func (o WriterOptions) ensureDefaults() WriterOptions {
if o.Compression <= DefaultCompression || o.Compression >= NCompression {
o.Compression = SnappyCompression
}
if o.IndexBlockSize <= 0 {
if o.IndexBlockSize <= cache.ValueMetadataSize {
o.IndexBlockSize = o.BlockSize
} else {
o.IndexBlockSize -= cache.ValueMetadataSize
}
if o.MergerName == "" {
o.MergerName = base.DefaultMerger.Name
Expand Down
4 changes: 2 additions & 2 deletions sstable/reader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1406,8 +1406,8 @@ func TestReaderChecksumErrors(t *testing.T) {
}

w := NewWriter(objstorageprovider.NewFileWritable(f), WriterOptions{
BlockSize: blockSize,
IndexBlockSize: indexBlockSize,
BlockSize: blockSize + cache.ValueMetadataSize,
IndexBlockSize: indexBlockSize + cache.ValueMetadataSize,
Checksum: checksumType,
})
require.NoError(t, w.Set(bytes.Repeat([]byte("a"), blockSize), nil))
Expand Down
5 changes: 3 additions & 2 deletions sstable/test_fixtures.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (

"github.com/cockroachdb/pebble/bloom"
"github.com/cockroachdb/pebble/internal/base"
"github.com/cockroachdb/pebble/internal/cache"
"github.com/cockroachdb/pebble/objstorage/objstorageprovider"
"github.com/cockroachdb/pebble/vfs"
)
Expand Down Expand Up @@ -266,8 +267,8 @@ func (tf TestFixtureInfo) Build(fs vfs.FS, filename string) error {
}

const fixtureDefaultIndexBlockSize = math.MaxInt32
const fixtureSmallIndexBlockSize = 128
const fixtureBlockSize = 2048
const fixtureSmallIndexBlockSize = 128 + cache.ValueMetadataSize
const fixtureBlockSize = 2048 + cache.ValueMetadataSize
const fixtureFormat = TableFormatPebblev1

var fixtureComparer = func() *Comparer {
Expand Down
2 changes: 1 addition & 1 deletion sstable/writer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -767,7 +767,7 @@ func TestWriterBlockPropertiesErrors(t *testing.T) {
require.NoError(t, err)

w := NewWriter(objstorageprovider.NewFileWritable(f), WriterOptions{
BlockSize: 1,
BlockSize: 1 + cache.ValueMetadataSize,
BlockPropertyCollectors: []func() BlockPropertyCollector{
func() BlockPropertyCollector {
return &testBlockPropCollector{
Expand Down

0 comments on commit 458a389

Please sign in to comment.