From 07595564afa6201d8858a06d47ea9c26023307de Mon Sep 17 00:00:00 2001 From: guo-shaoge Date: Tue, 18 May 2021 14:59:05 +0800 Subject: [PATCH] Revert "add isReaderStale" This reverts commit c45b41dae7eab88cbf4edbdb433e3ef4214bd3c6. --- util/chunk/disk.go | 19 +++++-------------- util/chunk/disk_test.go | 30 ------------------------------ 2 files changed, 5 insertions(+), 44 deletions(-) diff --git a/util/chunk/disk.go b/util/chunk/disk.go index f0d16dc037157..ef269213e9d0d 100644 --- a/util/chunk/disk.go +++ b/util/chunk/disk.go @@ -49,10 +49,6 @@ type ListInDisk struct { checksumWriter *checksum.Writer cipherWriter *encrypt.Writer - // By using this flag, we don't need to allocate a new reader every time. - isReaderStale bool - r io.ReaderAt - // ctrCipher stores the key and nonce using by aes encrypt io layer ctrCipher *encrypt.CtrCipher } @@ -64,8 +60,7 @@ func NewListInDisk(fieldTypes []*types.FieldType) *ListInDisk { l := &ListInDisk{ fieldTypes: fieldTypes, // TODO(fengliyuan): set the quota of disk usage. - diskTracker: disk.NewTracker(memory.LabelForChunkListInDisk, -1), - isReaderStale: true, + diskTracker: disk.NewTracker(memory.LabelForChunkListInDisk, -1), } return l } @@ -155,7 +150,6 @@ func (l *ListInDisk) Add(chk *Chunk) (err error) { l.offsets = append(l.offsets, chk2.getOffsetsOfRows()) l.diskTracker.Consume(n) l.numRowsInDisk += chk.NumRows() - l.isReaderStale = true return } @@ -180,14 +174,11 @@ func (l *ListInDisk) GetRow(ptr RowPtr) (row Row, err error) { } off := l.offsets[ptr.ChkIdx][ptr.RowIdx] var underlying io.ReaderAt = l.disk - if l.isReaderStale { - if l.ctrCipher != nil { - underlying = NewReaderWithCache(encrypt.NewReader(l.disk, l.ctrCipher), l.cipherWriter.GetCache(), l.cipherWriter.GetCacheDataOffset()) - } - l.r = NewReaderWithCache(checksum.NewReader(underlying), l.checksumWriter.GetCache(), l.checksumWriter.GetCacheDataOffset()) - l.isReaderStale = false + if l.ctrCipher != nil { + underlying = NewReaderWithCache(encrypt.NewReader(l.disk, l.ctrCipher), l.cipherWriter.GetCache(), l.cipherWriter.GetCacheDataOffset()) } - r := io.NewSectionReader(l.r, off, l.offWrite-off) + checksumReader := NewReaderWithCache(checksum.NewReader(underlying), l.checksumWriter.GetCache(), l.checksumWriter.GetCacheDataOffset()) + r := io.NewSectionReader(checksumReader, off, l.offWrite-off) format := rowInDisk{numCol: len(l.fieldTypes)} _, err = format.ReadFrom(r) if err != nil { diff --git a/util/chunk/disk_test.go b/util/chunk/disk_test.go index c534f6c49b009..36750aa898244 100644 --- a/util/chunk/disk_test.go +++ b/util/chunk/disk_test.go @@ -263,7 +263,6 @@ func testReaderWithCache(c *check.C) { chk := NewChunkWithCapacity(field, 1) chk.AppendString(0, buf.String()) l := NewListInDisk(field) - c.Assert(l.isReaderStale, check.IsTrue) err := l.Add(chk) c.Assert(err, check.IsNil) @@ -328,35 +327,6 @@ func testReaderWithCache(c *check.C) { c.Assert(err, check.IsNil) c.Assert(readCnt, check.Equals, 10) c.Assert(reflect.DeepEqual(data, buf.Bytes()[1002:1012]), check.IsTrue) - - // Test l.isReaderStale works properly - // It means only new reader is alloced after writing. - oriReader := l.r - for i := 0; i < 100; i++ { - row, err = l.GetRow(RowPtr{0, 0}) - c.Assert(err, check.IsNil) - c.Assert(oriReader == l.r, check.IsTrue) - c.Assert(l.isReaderStale, check.IsFalse) - } - // After write, reader is stale. - err = l.Add(chk) - c.Assert(err, check.IsNil) - c.Assert(oriReader == l.r, check.IsTrue) - c.Assert(l.isReaderStale, check.IsTrue) - - // New reader is generated when reading. - row, err = l.GetRow(RowPtr{1, 0}) - c.Assert(err, check.IsNil) - c.Assert(oriReader != l.r, check.IsTrue) - c.Assert(l.isReaderStale, check.IsFalse) - oriReader = l.r - - for i := 0; i < 100; i++ { - row, err = l.GetRow(RowPtr{0, 0}) - c.Assert(err, check.IsNil) - c.Assert(oriReader == l.r, check.IsTrue) - c.Assert(l.isReaderStale, check.IsFalse) - } } // Here we test situations where size of data is small, so no data is flushed to disk.