diff --git a/fastcache.go b/fastcache.go index 342b67d..daa3db7 100644 --- a/fastcache.go +++ b/fastcache.go @@ -271,20 +271,18 @@ func (b *bucket) Reset() { b.mu.Unlock() } -func (b *bucket) Clean() { - b.mu.Lock() +func (b *bucket) cleanLocked() { bGen := b.gen & ((1 << genSizeBits) - 1) bIdx := b.idx bm := b.m for k, v := range bm { gen := v >> bucketSizeBits idx := v & ((1 << bucketSizeBits) - 1) - if gen == bGen && idx < bIdx || gen+1 == bGen && idx >= bIdx || gen == maxGen && bGen == 1 && idx >= bIdx { + if (gen+1 == bGen || gen == maxGen && bGen == 1) && idx >= bIdx || gen == bGen && idx < bIdx { continue } delete(bm, k) } - b.mu.Unlock() } func (b *bucket) UpdateStats(s *Stats) { @@ -296,19 +294,17 @@ func (b *bucket) UpdateStats(s *Stats) { b.mu.RLock() s.EntriesCount += uint64(len(b.m)) + bytesSize := uint64(0) for _, chunk := range b.chunks { - s.BytesSize += uint64(cap(chunk)) + bytesSize += uint64(cap(chunk)) } - s.MaxBytesSize += uint64(len(b.chunks))*chunkSize + s.BytesSize += bytesSize + s.MaxBytesSize += uint64(len(b.chunks)) * chunkSize b.mu.RUnlock() } func (b *bucket) Set(k, v []byte, h uint64) { - setCalls := atomic.AddUint64(&b.setCalls, 1) - if setCalls%(1<<14) == 0 { - b.Clean() - } - + atomic.AddUint64(&b.setCalls, 1) if len(k) >= (1<<16) || len(v) >= (1<<16) { // Too big key or value - its length cannot be encoded // with 2 bytes (see below). Skip the entry. @@ -326,13 +322,15 @@ func (b *bucket) Set(k, v []byte, h uint64) { return } + chunks := b.chunks + needClean := false b.mu.Lock() idx := b.idx idxNew := idx + kvLen chunkIdx := idx / chunkSize chunkIdxNew := idxNew / chunkSize if chunkIdxNew > chunkIdx { - if chunkIdxNew >= uint64(len(b.chunks)) { + if chunkIdxNew >= uint64(len(chunks)) { idx = 0 idxNew = kvLen chunkIdx = 0 @@ -340,14 +338,15 @@ func (b *bucket) Set(k, v []byte, h uint64) { if b.gen&((1<= b.idx || gen == maxGen && bGen == 1 && idx >= b.idx { chunkIdx := idx / chunkSize - if chunkIdx >= uint64(len(b.chunks)) { + if chunkIdx >= uint64(len(chunks)) { // Corrupted data during the load from file. Just skip it. atomic.AddUint64(&b.corruptions, 1) goto end } - chunk := b.chunks[chunkIdx] + chunk := chunks[chunkIdx] idx %= chunkSize if idx+4 >= chunkSize { // Corrupted data during the load from file. Just skip it. diff --git a/file.go b/file.go index bab5484..dfbc070 100644 --- a/file.go +++ b/file.go @@ -272,7 +272,9 @@ func loadBuckets(buckets []bucket, dataPath string, maxChunks uint64) error { } func (b *bucket) Save(w io.Writer) error { - b.Clean() + b.mu.Lock() + b.cleanLocked() + b.mu.Unlock() b.mu.RLock() defer b.mu.RUnlock() diff --git a/malloc_heap.go b/malloc_heap.go index 79a7183..810d460 100644 --- a/malloc_heap.go +++ b/malloc_heap.go @@ -1,3 +1,4 @@ +//go:build appengine || windows // +build appengine windows package fastcache diff --git a/malloc_mmap.go b/malloc_mmap.go index e0cd0e7..e24d578 100644 --- a/malloc_mmap.go +++ b/malloc_mmap.go @@ -1,3 +1,4 @@ +//go:build !appengine && !windows // +build !appengine,!windows package fastcache