Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

util: optimize the memory usage of the read path for listInDisk #34778

Merged
merged 7 commits into from
Jun 6, 2022
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 25 additions & 23 deletions util/chunk/disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"io"
"os"
"strconv"
"sync"

errors2 "github.com/pingcap/errors"
"github.com/pingcap/tidb/config"
Expand All @@ -39,6 +40,8 @@ type ListInDisk struct {

dataFile diskFileReaderWriter
offsetFile diskFileReaderWriter

chkPool *sync.Pool // Using a Chunk Pool to avoid constructing a chunk structure for each GetRow()
}

// diskFileReaderWriter represents a Reader and a Writer for the temporary disk file.
Expand Down Expand Up @@ -105,6 +108,9 @@ func NewListInDisk(fieldTypes []*types.FieldType) *ListInDisk {
fieldTypes: fieldTypes,
// TODO(fengliyuan): set the quota of disk usage.
diskTracker: disk.NewTracker(memory.LabelForChunkListInDisk, -1),
chkPool: &sync.Pool{New: func() interface{} {
return NewChunkWithCapacity(fieldTypes, 1024)
}},
}
return l
}
Expand Down Expand Up @@ -194,7 +200,9 @@ func (l *ListInDisk) GetRow(ptr RowPtr) (row Row, err error) {
if err != nil {
return row, err
}
row = format.toMutRow(l.fieldTypes).ToRow()
chk := l.chkPool.Get().(*Chunk)
row, chk = format.toRow(l.fieldTypes, chk)
l.chkPool.Put(chk)
return row, err
}

Expand Down Expand Up @@ -233,6 +241,7 @@ func (l *ListInDisk) Close() error {
terror.Call(l.offsetFile.disk.Close)
terror.Log(os.Remove(l.offsetFile.disk.Name()))
}
l.chkPool = nil
return nil
}

Expand Down Expand Up @@ -348,7 +357,7 @@ type diskFormatRow struct {
sizesOfColumns []int64 // -1 means null
// cells represents raw data of not-null columns in one row.
// In convertFromRow, data from Row is shallow copied to cells.
// In toMutRow, data in cells is shallow copied to MutRow.
// In toRow, data in cells is deep copied to Row.
cells [][]byte
}

Expand Down Expand Up @@ -378,35 +387,28 @@ func convertFromRow(row Row, reuse *diskFormatRow) (format *diskFormatRow) {
return
}

// toMutRow deserializes diskFormatRow to MutRow.
func (format *diskFormatRow) toMutRow(fields []*types.FieldType) MutRow {
chk := &Chunk{columns: make([]*Column, 0, len(format.sizesOfColumns))}
// toRow deserializes diskFormatRow to Row.
func (format *diskFormatRow) toRow(fields []*types.FieldType, chk *Chunk) (Row, *Chunk) {
if chk == nil || chk.IsFull() {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a quesction, chk == nil maybe unnecessary? Because sync.Pool will always new a chunk.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Check chk==nil is more safe.. For example, the code in test cases doesn't have the sync.Pool and doesn't need to reuse.

chk = NewChunkWithCapacity(fields, 1024)
}
var cellOff int
for colIdx, size := range format.sizesOfColumns {
col := &Column{length: 1}
elemSize := getFixedLen(fields[colIdx])
col := chk.columns[colIdx]
if size == -1 { // isNull
col.nullBitmap = []byte{0}
if elemSize == varElemLen {
col.offsets = []int64{0, 0}
} else {
buf := make([]byte, elemSize)
col.data = buf
col.elemBuf = buf
}
col.AppendNull()
} else {
col.nullBitmap = []byte{1}
col.data = format.cells[cellOff]
cellOff++
if elemSize == varElemLen {
col.offsets = []int64{0, int64(len(col.data))}
if col.isFixed() {
col.elemBuf = format.cells[cellOff]
col.finishAppendFixed()
} else {
col.elemBuf = col.data
col.AppendBytes(format.cells[cellOff])
}
cellOff++
}
chk.columns = append(chk.columns, col)
}
return MutRow{c: chk}

return Row{c: chk, idx: chk.NumRows() - 1}, chk
}

// ReaderWithCache helps to read data that has not be flushed to underlying layer.
Expand Down
46 changes: 38 additions & 8 deletions util/chunk/disk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"path/filepath"
"strconv"
"strings"
"sync"
"testing"

errors2 "github.com/pingcap/errors"
Expand Down Expand Up @@ -189,7 +190,7 @@ func (l *listInDiskWriteDisk) GetRow(ptr RowPtr) (row Row, err error) {
if err != nil {
return row, err
}
row = format.toMutRow(l.fieldTypes).ToRow()
row, _ = format.toRow(l.fieldTypes, nil)
return row, err
}

Expand All @@ -211,7 +212,7 @@ func checkRow(t *testing.T, row1, row2 Row) {
}
}

func testListInDisk(t *testing.T) {
func testListInDisk(t *testing.T, concurrency int) {
numChk, numRow := 10, 1000
chks, fields := initChunks(numChk, numRow)
lChecksum := NewListInDisk(fields)
Expand All @@ -236,21 +237,42 @@ func testListInDisk(t *testing.T) {
}
}

expectRows := make([]Row, 0, len(ptrs))
for _, rowPtr := range ptrs {
row1, err := lChecksum.GetRow(rowPtr)
row, err := lDisk.GetRow(rowPtr)
require.NoError(t, err)
row2, err := lDisk.GetRow(rowPtr)
require.NoError(t, err)
checkRow(t, row1, row2)
expectRows = append(expectRows, row)
}

wg := sync.WaitGroup{}
wg.Add(concurrency)
for con := 0; con < concurrency; con++ {
go func() {
for i, rowPtr := range ptrs {
row, err := lChecksum.GetRow(rowPtr)
require.NoError(t, err)
checkRow(t, row, expectRows[i])
}
wg.Done()
}()
}
wg.Wait()
}

func TestListInDiskWithChecksum(t *testing.T) {
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.Security.SpilledFileEncryptionMethod = config.SpilledFileEncryptionMethodPlaintext
})
t.Run("testListInDisk", testListInDisk)
t.Run("testListInDisk", func(t *testing.T) {
testListInDisk(t, 1)
})
t.Run("testListInDisk", func(t *testing.T) {
testListInDisk(t, 2)
})
t.Run("testListInDisk", func(t *testing.T) {
testListInDisk(t, 8)
})

t.Run("testReaderWithCache", testReaderWithCache)
t.Run("testReaderWithCacheNoFlush", testReaderWithCacheNoFlush)
Expand All @@ -261,7 +283,15 @@ func TestListInDiskWithChecksumAndEncrypt(t *testing.T) {
config.UpdateGlobal(func(conf *config.Config) {
conf.Security.SpilledFileEncryptionMethod = config.SpilledFileEncryptionMethodAES128CTR
})
t.Run("testListInDisk", testListInDisk)
t.Run("testListInDisk", func(t *testing.T) {
testListInDisk(t, 1)
})
t.Run("testListInDisk", func(t *testing.T) {
testListInDisk(t, 2)
})
t.Run("testListInDisk", func(t *testing.T) {
testListInDisk(t, 8)
})

t.Run("testReaderWithCache", testReaderWithCache)
t.Run("testReaderWithCacheNoFlush", testReaderWithCacheNoFlush)
Expand Down