forked from gohugoio/hugo
-
Notifications
You must be signed in to change notification settings - Fork 6
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Consolidate most memory into a LRU cache
This commit also consolidates all (or the most important) memory caches in Hugo. Fixes gohugoio#7425
- Loading branch information
Showing
25 changed files
with
577 additions
and
366 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,241 @@ | ||
// Copyright 2020 The Hugo Authors. All rights reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
// Package memcache provides the core memory cache used in Hugo. | ||
package memcache | ||
|
||
import ( | ||
"fmt" | ||
"runtime" | ||
"sync/atomic" | ||
"time" | ||
|
||
"github.com/BurntSushi/locker" | ||
"github.com/gohugoio/hugo/helpers" | ||
"github.com/karlseguin/ccache" | ||
"github.com/pbnjay/memory" | ||
) | ||
|
||
const ( | ||
gigabyte = 1 << 30 | ||
) | ||
|
||
// Cache configures a cache. | ||
type Cache struct { | ||
conf Config | ||
cache *ccache.LayeredCache | ||
|
||
ttl time.Duration | ||
nlocker *locker.Locker | ||
|
||
stats *stats | ||
stop func() | ||
} | ||
|
||
type stats struct { | ||
memstatsStart runtime.MemStats | ||
memstatsCurrent runtime.MemStats | ||
maxSize int64 | ||
|
||
// This is an estimated/best guess value. TODO1 env factor. | ||
availableMemory uint64 | ||
|
||
numItems uint64 | ||
} | ||
|
||
func (s *stats) isLowOnMemory() bool { | ||
return s.memstatsCurrent.Alloc > s.availableMemory | ||
} | ||
|
||
func (s *stats) newMaxSize() int64 { | ||
s.maxSize = s.maxSize / 2 | ||
if s.maxSize < 20 { | ||
s.maxSize = 20 | ||
} | ||
return s.maxSize | ||
} | ||
|
||
func (s *stats) incr(i int) { | ||
atomic.AddUint64(&s.numItems, uint64(i)) | ||
} | ||
|
||
func (s *stats) decr(i int) { | ||
atomic.AddUint64(&s.numItems, ^uint64(i-1)) | ||
} | ||
|
||
type cacheEntry struct { | ||
size int64 | ||
value interface{} | ||
err error | ||
} | ||
|
||
func (c cacheEntry) Size() int64 { | ||
return c.size | ||
} | ||
|
||
type Config struct { | ||
CheckInterval time.Duration | ||
MaxSize int64 | ||
ItemsToPrune uint32 | ||
TTL time.Duration | ||
} | ||
|
||
// New creates a new cache. | ||
func New(conf Config) *Cache { | ||
if conf.TTL == 0 { | ||
conf.TTL = time.Second * 33 | ||
} | ||
if conf.CheckInterval == 0 { | ||
conf.CheckInterval = time.Second * 2 | ||
} | ||
if conf.MaxSize == 0 { | ||
conf.MaxSize = 1000 | ||
} | ||
if conf.ItemsToPrune == 0 { | ||
conf.ItemsToPrune = 200 | ||
} | ||
|
||
var m runtime.MemStats | ||
runtime.ReadMemStats(&m) | ||
|
||
var availableMemory uint64 | ||
|
||
// The total memory does not exclude memory used by other processes. | ||
// For now, let's say that Hugo can use a fraction of it. | ||
total := memory.TotalMemory() | ||
if total != 0 { | ||
availableMemory = total / 4 | ||
} else { | ||
availableMemory = 2 * gigabyte | ||
} | ||
|
||
stats := &stats{ | ||
memstatsStart: m, | ||
maxSize: conf.MaxSize, | ||
availableMemory: availableMemory, | ||
} | ||
|
||
if stats.isLowOnMemory() { | ||
conf.MaxSize = stats.newMaxSize() | ||
} | ||
|
||
c := &Cache{ | ||
conf: conf, | ||
cache: ccache.Layered(ccache.Configure().MaxSize(conf.MaxSize).ItemsToPrune(conf.ItemsToPrune)), | ||
ttl: conf.TTL, | ||
stats: stats, | ||
nlocker: locker.NewLocker(), | ||
} | ||
|
||
c.stop = c.start() | ||
|
||
return c | ||
} | ||
|
||
func (c *Cache) start() func() { | ||
ticker := time.NewTicker(c.conf.CheckInterval) | ||
quit := make(chan struct{}) | ||
|
||
checkAndAdjustMaxSize := func() { | ||
var m runtime.MemStats | ||
cacheDropped := c.GetDropped() | ||
c.stats.decr(cacheDropped) | ||
|
||
runtime.ReadMemStats(&m) | ||
c.stats.memstatsCurrent = m | ||
if c.stats.isLowOnMemory() { | ||
c.cache.SetMaxSize(c.stats.newMaxSize()) | ||
} | ||
|
||
fmt.Printf("\n\nAlloc = %v\nTotalAlloc = %v\nSys = %v\nNumGC = %v\nMemCacheDropped = %d\n\n", helpers.FormatByteCount(m.Alloc), helpers.FormatByteCount(m.TotalAlloc), helpers.FormatByteCount(m.Sys), m.NumGC, cacheDropped) | ||
|
||
} | ||
go func() { | ||
for { | ||
select { | ||
case <-ticker.C: | ||
checkAndAdjustMaxSize() | ||
case <-quit: | ||
ticker.Stop() | ||
return | ||
} | ||
} | ||
}() | ||
|
||
return func() { | ||
close(quit) | ||
} | ||
} | ||
|
||
// Clear clears the cache state. | ||
// This method is not thread safe. | ||
func (c *Cache) Clear() { | ||
c.nlocker = locker.NewLocker() | ||
c.cache.Clear() | ||
} | ||
|
||
func (c *Cache) Has(primary, secondary string) bool { | ||
return c.cache.Get(primary, secondary) != nil | ||
} | ||
|
||
func (c *Cache) Get(primary, secondary string) (interface{}, bool) { | ||
v := c.cache.Get(primary, secondary) | ||
if v == nil { | ||
return nil, false | ||
} | ||
return v.Value(), true | ||
} | ||
|
||
func (c *Cache) DeleteAll(primary string) bool { | ||
return c.cache.DeleteAll(primary) | ||
} | ||
|
||
func (c *Cache) Stop() { | ||
c.stop() | ||
c.cache.Stop() | ||
} | ||
|
||
func (c *Cache) GetDropped() int { | ||
return c.cache.GetDropped() | ||
} | ||
|
||
// GetOrCreate tries to get the value with the given cache keys, if not found | ||
// create will be called and cached. | ||
// This method is thread safe. | ||
func (c *Cache) GetOrCreate(primary, secondary string, create func() (interface{}, error)) (interface{}, error) { | ||
if v := c.cache.Get(primary, secondary); v != nil { | ||
entry := v.Value().(cacheEntry) | ||
return entry.value, entry.err | ||
} | ||
|
||
// The provided create function may be a relatively time consuming operation, | ||
// and there will in the commmon case be concurrent requests for the same key'd | ||
// resource, so make sure we pause these until the result is ready. | ||
key := primary + secondary | ||
c.nlocker.Lock(key) | ||
defer c.nlocker.Unlock(key) | ||
|
||
// Try again. | ||
if v := c.cache.Get(primary, secondary); v != nil { | ||
entry := v.Value().(cacheEntry) | ||
return entry.value, entry.err | ||
} | ||
|
||
// Create it and store it in cache. | ||
value, err := create() | ||
|
||
c.cache.Set(primary, secondary, cacheEntry{value: value, err: err, size: 1}, c.ttl) | ||
c.stats.incr(1) | ||
|
||
return value, err | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.