-
Notifications
You must be signed in to change notification settings - Fork 1
/
benchmark_test.go
145 lines (131 loc) · 3.34 KB
/
benchmark_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
package sc
import (
"context"
"testing"
"time"
)
func BenchmarkCache_Single_SameKey(b *testing.B) {
for _, c := range allCaches(10) {
c := c
b.Run(c.name, func(b *testing.B) {
replaceFn := func(ctx context.Context, key string) (string, error) {
return "value", nil
}
cache, err := New[string, string](replaceFn, 1*time.Minute, 1*time.Minute, c.cacheOpts...)
if err != nil {
b.Error(err)
}
ctx := context.Background()
b.StartTimer()
for i := 0; i < b.N; i++ {
_, _ = cache.Get(ctx, "key")
}
b.Log(cache.Stats())
})
}
}
func BenchmarkCache_Single_Zipfian(b *testing.B) {
const (
size = 1000
s = 1.001
v = 100
)
for _, c := range allCaches(size) {
c := c
b.Run(c.name, func(b *testing.B) {
replaceFn := func(ctx context.Context, key string) (string, error) {
return "value", nil
}
cache, err := New[string, string](replaceFn, 1*time.Minute, 1*time.Minute, c.cacheOpts...)
if err != nil {
b.Error(err)
}
ctx := context.Background()
keys := newKeys(newZipfian(s, v, size*4), size*10)
b.StartTimer()
for i := 0; i < b.N; i++ {
_, _ = cache.Get(ctx, keys[i%(size*10)])
}
b.Log(cache.Stats())
})
}
}
func BenchmarkCache_Parallel_SameKey(b *testing.B) {
for _, c := range allCaches(10) {
c := c
b.Run(c.name, func(b *testing.B) {
replaceFn := func(ctx context.Context, key string) (string, error) {
return "value", nil
}
cache, err := New[string, string](replaceFn, 1*time.Minute, 1*time.Minute, c.cacheOpts...)
if err != nil {
b.Error(err)
}
ctx := context.Background()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, _ = cache.Get(ctx, "key")
}
})
b.Log(cache.Stats())
})
}
}
func BenchmarkCache_Parallel_Zipfian(b *testing.B) {
const (
size = 1000
s = 1.001
v = 100
)
for _, c := range allCaches(size) {
c := c
b.Run(c.name, func(b *testing.B) {
replaceFn := func(ctx context.Context, key string) (string, error) {
return "value", nil
}
cache, err := New[string, string](replaceFn, 1*time.Minute, 1*time.Minute, c.cacheOpts...)
if err != nil {
b.Error(err)
}
ctx := context.Background()
b.RunParallel(func(pb *testing.PB) {
nextKey := newZipfian(s, v, size*4)
for pb.Next() {
_, _ = cache.Get(ctx, nextKey())
}
})
b.Log(cache.Stats())
})
}
}
// BenchmarkCache_RealWorkLoad benchmarks caches with simulated real world load - zipfian distributed keys
// and replace func that takes 1ms to load.
func BenchmarkCache_RealWorkLoad(b *testing.B) {
const (
size = 1000
s = 1.001
v = 100
)
// Only benchmark against evicting caches (not the built-in map backend) because the map backend can cache all values.
for _, c := range evictingCaches(size) {
c := c
b.Run(c.name, func(b *testing.B) {
replaceFn := func(ctx context.Context, key string) (string, error) {
time.Sleep(1 * time.Millisecond) // simulate some value that takes 1ms to load
return "value", nil
}
cache, err := New[string, string](replaceFn, 100*time.Millisecond, 200*time.Millisecond, c.cacheOpts...)
if err != nil {
b.Error(err)
}
ctx := context.Background()
b.RunParallel(func(pb *testing.PB) {
nextKey := newZipfian(s, v, size*4)
for pb.Next() {
_, _ = cache.Get(ctx, nextKey())
}
})
b.Log(cache.Stats())
})
}
}