From a5939f4d9874621f9cf365baefb38efc4cea4b8b Mon Sep 17 00:00:00 2001 From: sumeerbhola Date: Fri, 8 Nov 2024 16:19:25 -0500 Subject: [PATCH] cache: misc tiny cleanups - Removed entry.peekValue, since there was existing code in clockpro.go that directly accesses e.val, and the method indirection hinders readability. - Updated the comment about cgo pointer rules since entry no longer points to shard. - Removed the comment about shard size which suggested that all the blocks of a file are mapped to the same shard, since that is not true. - Added an assertion when a test entry has a value being set. --- internal/cache/clockpro.go | 14 +++++++------- internal/cache/entry.go | 19 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/internal/cache/clockpro.go b/internal/cache/clockpro.go index 1100baabd5..2459ec7b69 100644 --- a/internal/cache/clockpro.go +++ b/internal/cache/clockpro.go @@ -160,7 +160,7 @@ func (c *shard) Set(id ID, fileNum base.DiskFileNum, offset uint64, value *Value e = nil } - case e.peekValue() != nil: + case e.val != nil: // cache entry was a hot or cold page e.setValue(value) e.referenced.Store(true) @@ -179,7 +179,10 @@ func (c *shard) Set(id ID, fileNum base.DiskFileNum, offset uint64, value *Value // cache entry was a test page c.sizeTest -= e.size c.countTest-- - c.metaDel(e).release() + v := c.metaDel(e) + if invariants.Enabled && v != nil { + panic("value should be nil") + } c.metaCheck(e) e.size = int64(len(value.buf)) @@ -402,7 +405,7 @@ func (c *shard) metaAdd(key key, e *entry) bool { // the files map, and ensures that hand{Hot,Cold,Test} are not pointing at the // entry. Returns the deleted value that must be released, if any. func (c *shard) metaDel(e *entry) (deletedValue *Value) { - if value := e.peekValue(); value != nil { + if value := e.val; value != nil { value.ref.trace("metaDel") } // Remove the pointer to the value. @@ -708,10 +711,7 @@ func New(size int64) *Cache { // We could consider growing the number of shards superlinearly, but // increasing the shard count may reduce the effectiveness of the caching // algorithm if frequently-accessed blocks are insufficiently distributed - // across shards. If a shard's size is smaller than a single frequently - // scanned sstable, then the shard will be unable to hold the entire - // frequently-scanned table in memory despite other shards still holding - // infrequently accessed blocks. + // across shards. // // Experimentally, we've observed contention contributing to tail latencies // at 2 shards per processor. For now we use 4 shards per processor, diff --git a/internal/cache/entry.go b/internal/cache/entry.go index 81f011a4b8..5473a86704 100644 --- a/internal/cache/entry.go +++ b/internal/cache/entry.go @@ -29,18 +29,21 @@ func (p entryType) String() string { // entry holds the metadata for a cache entry. The memory for an entry is // allocated from manually managed memory. // -// Using manual memory management for entries is technically a volation of the -// Cgo pointer rules: +// Using manual memory management for entries may seem to be a violation of +// the Cgo pointer rules: // // https://golang.org/cmd/cgo/#hdr-Passing_pointers // // Specifically, Go pointers should not be stored in C allocated memory. The // reason for this rule is that the Go GC will not look at C allocated memory // to find pointers to Go objects. If the only reference to a Go object is -// stored in C allocated memory, the object will be reclaimed. The shard field -// of the entry struct points to a Go allocated object, thus the -// violation. What makes this "safe" is that the Cache guarantees that there -// are other pointers to the shard which will keep it alive. +// stored in C allocated memory, the object will be reclaimed. The entry +// contains various pointers to other entries. This does not violate the Go +// pointer rules because either all entries are manually allocated or none +// are. Also, even if we had a mix of C and Go allocated memory, which would +// violate the rule, we would not have this reclamation problem since the +// lifetime of the entry is managed by the shard containing it, and not +// reliant on the entry pointers. type entry struct { key key // The value associated with the entry. The entry holds a reference on the @@ -135,10 +138,6 @@ func (e *entry) setValue(v *Value) { old.release() } -func (e *entry) peekValue() *Value { - return e.val -} - func (e *entry) acquireValue() *Value { v := e.val if v != nil {