Skip to content

Commit

Permalink
Consolidate most memory into a LRU cache
Browse files Browse the repository at this point in the history
This commit also consolidates all (or the most important) memory caches in Hugo.

Fixes gohugoio#7425
  • Loading branch information
bep committed Jun 27, 2020
1 parent 057b137 commit c6d29f7
Show file tree
Hide file tree
Showing 25 changed files with 577 additions and 366 deletions.
241 changes: 241 additions & 0 deletions cache/memcache/memcache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,241 @@
// Copyright 2020 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Package memcache provides the core memory cache used in Hugo.
package memcache

import (
"fmt"
"runtime"
"sync/atomic"
"time"

"github.com/BurntSushi/locker"
"github.com/gohugoio/hugo/helpers"
"github.com/karlseguin/ccache"
"github.com/pbnjay/memory"
)

const (
gigabyte = 1 << 30
)

// Cache configures a cache.
type Cache struct {
conf Config
cache *ccache.LayeredCache

ttl time.Duration
nlocker *locker.Locker

stats *stats
stop func()
}

type stats struct {
memstatsStart runtime.MemStats
memstatsCurrent runtime.MemStats
maxSize int64

// This is an estimated/best guess value. TODO1 env factor.
availableMemory uint64

numItems uint64
}

func (s *stats) isLowOnMemory() bool {
return s.memstatsCurrent.Alloc > s.availableMemory
}

func (s *stats) newMaxSize() int64 {
s.maxSize = s.maxSize / 2
if s.maxSize < 20 {
s.maxSize = 20
}
return s.maxSize
}

func (s *stats) incr(i int) {
atomic.AddUint64(&s.numItems, uint64(i))
}

func (s *stats) decr(i int) {
atomic.AddUint64(&s.numItems, ^uint64(i-1))
}

type cacheEntry struct {
size int64
value interface{}
err error
}

func (c cacheEntry) Size() int64 {
return c.size
}

type Config struct {
CheckInterval time.Duration
MaxSize int64
ItemsToPrune uint32
TTL time.Duration
}

// New creates a new cache.
func New(conf Config) *Cache {
if conf.TTL == 0 {
conf.TTL = time.Second * 33
}
if conf.CheckInterval == 0 {
conf.CheckInterval = time.Second * 2
}
if conf.MaxSize == 0 {
conf.MaxSize = 1000
}
if conf.ItemsToPrune == 0 {
conf.ItemsToPrune = 200
}

var m runtime.MemStats
runtime.ReadMemStats(&m)

var availableMemory uint64

// The total memory does not exclude memory used by other processes.
// For now, let's say that Hugo can use a fraction of it.
total := memory.TotalMemory()
if total != 0 {
availableMemory = total / 4
} else {
availableMemory = 2 * gigabyte
}

stats := &stats{
memstatsStart: m,
maxSize: conf.MaxSize,
availableMemory: availableMemory,
}

if stats.isLowOnMemory() {
conf.MaxSize = stats.newMaxSize()
}

c := &Cache{
conf: conf,
cache: ccache.Layered(ccache.Configure().MaxSize(conf.MaxSize).ItemsToPrune(conf.ItemsToPrune)),
ttl: conf.TTL,
stats: stats,
nlocker: locker.NewLocker(),
}

c.stop = c.start()

return c
}

func (c *Cache) start() func() {
ticker := time.NewTicker(c.conf.CheckInterval)
quit := make(chan struct{})

checkAndAdjustMaxSize := func() {
var m runtime.MemStats
cacheDropped := c.GetDropped()
c.stats.decr(cacheDropped)

runtime.ReadMemStats(&m)
c.stats.memstatsCurrent = m
if c.stats.isLowOnMemory() {
c.cache.SetMaxSize(c.stats.newMaxSize())
}

fmt.Printf("\n\nAlloc = %v\nTotalAlloc = %v\nSys = %v\nNumGC = %v\nMemCacheDropped = %d\n\n", helpers.FormatByteCount(m.Alloc), helpers.FormatByteCount(m.TotalAlloc), helpers.FormatByteCount(m.Sys), m.NumGC, cacheDropped)

}
go func() {
for {
select {
case <-ticker.C:
checkAndAdjustMaxSize()
case <-quit:
ticker.Stop()
return
}
}
}()

return func() {
close(quit)
}
}

// Clear clears the cache state.
// This method is not thread safe.
func (c *Cache) Clear() {
c.nlocker = locker.NewLocker()
c.cache.Clear()
}

func (c *Cache) Has(primary, secondary string) bool {
return c.cache.Get(primary, secondary) != nil
}

func (c *Cache) Get(primary, secondary string) (interface{}, bool) {
v := c.cache.Get(primary, secondary)
if v == nil {
return nil, false
}
return v.Value(), true
}

func (c *Cache) DeleteAll(primary string) bool {
return c.cache.DeleteAll(primary)
}

func (c *Cache) Stop() {
c.stop()
c.cache.Stop()
}

func (c *Cache) GetDropped() int {
return c.cache.GetDropped()
}

// GetOrCreate tries to get the value with the given cache keys, if not found
// create will be called and cached.
// This method is thread safe.
func (c *Cache) GetOrCreate(primary, secondary string, create func() (interface{}, error)) (interface{}, error) {
if v := c.cache.Get(primary, secondary); v != nil {
entry := v.Value().(cacheEntry)
return entry.value, entry.err
}

// The provided create function may be a relatively time consuming operation,
// and there will in the commmon case be concurrent requests for the same key'd
// resource, so make sure we pause these until the result is ready.
key := primary + secondary
c.nlocker.Lock(key)
defer c.nlocker.Unlock(key)

// Try again.
if v := c.cache.Get(primary, secondary); v != nil {
entry := v.Value().(cacheEntry)
return entry.value, entry.err
}

// Create it and store it in cache.
value, err := create()

c.cache.Set(primary, secondary, cacheEntry{value: value, err: err, size: 1}, c.ttl)
c.stats.incr(1)

return value, err
}
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2018 The Hugo Authors. All rights reserved.
// Copyright 2020 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -11,21 +11,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.

package namedmemcache
package memcache

import (
"fmt"
"sync"
"testing"
"time"

qt "github.com/frankban/quicktest"
)

func TestNamedCache(t *testing.T) {
func TesCache(t *testing.T) {
t.Parallel()
c := qt.New(t)

cache := New()
cache := New(Config{})

counter := 0
create := func() (interface{}, error) {
Expand All @@ -34,29 +35,29 @@ func TestNamedCache(t *testing.T) {
}

for i := 0; i < 5; i++ {
v1, err := cache.GetOrCreate("a1", create)
v1, err := cache.GetOrCreate("a", "a1", create)
c.Assert(err, qt.IsNil)
c.Assert(v1, qt.Equals, 1)
v2, err := cache.GetOrCreate("a2", create)
v2, err := cache.GetOrCreate("a", "a2", create)
c.Assert(err, qt.IsNil)
c.Assert(v2, qt.Equals, 2)
}

cache.Clear()

v3, err := cache.GetOrCreate("a2", create)
v3, err := cache.GetOrCreate("a", "a2", create)
c.Assert(err, qt.IsNil)
c.Assert(v3, qt.Equals, 3)
}

func TestNamedCacheConcurrent(t *testing.T) {
func TestCacheConcurrent(t *testing.T) {
t.Parallel()

c := qt.New(t)

var wg sync.WaitGroup

cache := New()
cache := New(Config{})

create := func(i int) func() (interface{}, error) {
return func() (interface{}, error) {
Expand All @@ -70,11 +71,46 @@ func TestNamedCacheConcurrent(t *testing.T) {
defer wg.Done()
for j := 0; j < 100; j++ {
id := fmt.Sprintf("id%d", j)
v, err := cache.GetOrCreate(id, create(j))
v, err := cache.GetOrCreate("a", id, create(j))
c.Assert(err, qt.IsNil)
c.Assert(v, qt.Equals, j)
}
}()
}
wg.Wait()
}

func TestCacheMemStats(t *testing.T) {
t.Parallel()
c := qt.New(t)

cache := New(Config{
ItemsToPrune: 10,
CheckInterval: 500 * time.Millisecond,
})

s := cache.stats

c.Assert(s.memstatsStart.Alloc > 0, qt.Equals, true)
c.Assert(s.memstatsCurrent.Alloc, qt.Equals, uint64(0))
c.Assert(s.availableMemory > 0, qt.Equals, true)
c.Assert(s.numItems, qt.Equals, uint64(0))

counter := 0
create := func() (interface{}, error) {
counter++
return counter, nil
}

for i := 1; i <= 20; i++ {
_, err := cache.GetOrCreate("a", fmt.Sprintf("b%d", i), create)
c.Assert(err, qt.IsNil)
}

c.Assert(s.numItems, qt.Equals, uint64(20))
cache.cache.SetMaxSize(10)
time.Sleep(time.Millisecond * 600)
c.Assert(int(s.numItems), qt.Equals, 10)

c.Assert(s.memstatsCurrent.Alloc > 0, qt.Equals, true)
}
Loading

0 comments on commit c6d29f7

Please sign in to comment.