Skip to content

Commit

Permalink
hugolib: Compare every element in pages cache
Browse files Browse the repository at this point in the history
It is slightly slower, but correctnes is, of course, more important:

```bash
benchmark                             old ns/op     new ns/op     delta
BenchmarkSortByWeightAndReverse-4     367           645           +75.75%

benchmark                             old allocs     new allocs     delta
BenchmarkSortByWeightAndReverse-4     2              2              +0.00%

benchmark                             old bytes     new bytes     delta
BenchmarkSortByWeightAndReverse-4     64            64            +0.00%
```

Running the same benchmark without any cache (i.e. resorting the slice on every iteration) and then compare it to the current version shows that it still is plenty worth it:

```bash
▶ benchcmp 2.bench 1.bench
benchmark                             old ns/op     new ns/op     delta
BenchmarkSortByWeightAndReverse-4     1358757       645           -99.95%

benchmark                             old allocs     new allocs     delta
BenchmarkSortByWeightAndReverse-4     17159          2              -99.99%

benchmark                             old bytes     new bytes     delta
BenchmarkSortByWeightAndReverse-4     274573        64            -99.98%
```

Closes gohugoio#5239
  • Loading branch information
bep committed Sep 21, 2018
1 parent 16b26bf commit 04d67e3
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 16 deletions.
16 changes: 4 additions & 12 deletions hugolib/pageCache.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (entry pageCacheEntry) matches(pageLists []Pages) bool {
return false
}
for i, p := range pageLists {
if !fastEqualPages(p, entry.in[i]) {
if !pagesEqual(p, entry.in[i]) {
return false
}
}
Expand Down Expand Up @@ -103,10 +103,8 @@ func (c *pageCache) getP(key string, apply func(p *Pages), pageLists ...Pages) (

}

// "fast" as in: we do not compare every element for big slices, but that is
// good enough for our use cases.
// TODO(bep) there is a similar method in pagination.go. DRY.
func fastEqualPages(p1, p2 Pages) bool {
// pagesEqual returns whether p1 and p2 are equal.
func pagesEqual(p1, p2 Pages) bool {
if p1 == nil && p2 == nil {
return true
}
Expand All @@ -123,13 +121,7 @@ func fastEqualPages(p1, p2 Pages) bool {
return true
}

step := 1

if len(p1) >= 50 {
step = len(p1) / 10
}

for i := 0; i < len(p1); i += step {
for i := 0; i < len(p1); i++ {
if p1[i] != p2[i] {
return false
}
Expand Down
4 changes: 2 additions & 2 deletions hugolib/pageCache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ func TestPageCache(t *testing.T) {
l1.Unlock()
p2, c2 := c1.get("k1", nil, p)
assert.True(t, c2)
assert.True(t, fastEqualPages(p, p2))
assert.True(t, fastEqualPages(p, pages))
assert.True(t, pagesEqual(p, p2))
assert.True(t, pagesEqual(p, pages))
assert.NotNil(t, p)

l2.Lock()
Expand Down
2 changes: 1 addition & 1 deletion hugolib/pageSort_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ func TestPageSortReverse(t *testing.T) {
assert.Equal(t, 9, p2[0].fuzzyWordCount)
assert.Equal(t, 0, p2[9].fuzzyWordCount)
// cached
assert.True(t, fastEqualPages(p2, p1.Reverse()))
assert.True(t, pagesEqual(p2, p1.Reverse()))
}

func TestPageSortByParam(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion hugolib/pages_related.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ func newSearchIndexHandler(cfg related.Config) *relatedDocsHandler {
// This assumes that a lock has been acquired.
func (s *relatedDocsHandler) getIndex(p Pages) *related.InvertedIndex {
for _, ci := range s.postingLists {
if fastEqualPages(p, ci.p) {
if pagesEqual(p, ci.p) {
return ci.postingList
}
}
Expand Down

0 comments on commit 04d67e3

Please sign in to comment.