Skip to content

Commit

Permalink
Added benchmarks to AdaptivePool
Browse files Browse the repository at this point in the history
```shell
$ go test -run=- -bench=AdaptivePool -count=20 | benchstat -col=/implem,/method -
goos: linux
goarch: amd64
pkg: github.com/diegommm/adaptivepool
cpu: 13th Gen Intel(R) Core(TM) i7-13700H
                                            │              sync.Pool              │                                   AdaptivePool                                    │
                                            │     Get      │         Put          │                 Get                 │          Put          │     GetWithCost     │
                                            │    sec/op    │    sec/op      vs base   │    sec/op     vs base               │     sec/op      vs base   │    sec/op     vs base   │
AdaptivePool/created=true-20                  49.09n ±  8%                          51.31n ± 17%  +4.53% (p=0.022 n=20)
AdaptivePool/created=false-20                 10.26n ± 18%                          10.93n ± 16%  +6.53% (p=0.033 n=20)
AdaptivePool/accepted=true-20                                 15.98n ±  6%                                                  41.29n ± 10%
AdaptivePool/accepted=false-20                               0.8268n ± 20%                                                26.6200n ± 12%
AdaptivePool/created=true/fromPool=true-20                                                                                                        12.33n ± 17%
AdaptivePool/created=true/fromPool=false-20                                                                                                       47.04n ± 11%
AdaptivePool/created=false/fromPool=true-20                                                                                                       11.58n ± 14%
geomean                                       22.44n          3.634n        ? ¹ ²   23.68n        +5.53%                    33.15n        ? ¹ ²   18.87n        ? ¹ ²
¹ benchmark set differs from baseline; geomeans may not be comparable
² ratios must be >0 to compute geomean

                                            │             sync.Pool              │                                   AdaptivePool                                   │
                                            │     Get      │         Put         │                 Get                 │         Put          │     GetWithCost     │
                                            │     B/op     │    B/op     vs base │    B/op     vs base                 │    B/op      vs base │    B/op     vs base │
AdaptivePool/created=true-20                  0.000 ± 0%                           0.000 ± 0%       ~ (p=1.000 n=20) ¹
AdaptivePool/created=false-20                 0.000 ± 0%                           0.000 ± 0%       ~ (p=1.000 n=20) ¹
AdaptivePool/accepted=true-20                                24.00 ± 4%                                                  17.00 ± 12%
AdaptivePool/accepted=false-20                               0.000 ± 0%                                                  0.000 ±  0%
AdaptivePool/created=true/fromPool=true-20                                                                                                      0.000 ± 0%
AdaptivePool/created=true/fromPool=false-20                                                                                                     0.000 ± 0%
AdaptivePool/created=false/fromPool=true-20                                                                                                     0.000 ± 0%
geomean                                                  ²               ? ³ ² ⁴               +0.00%                ²                ? ³ ² ⁴               ? ³ ² ⁴
¹ all samples are equal
² summaries must be >0 to compute geomean
³ benchmark set differs from baseline; geomeans may not be comparable
⁴ ratios must be >0 to compute geomean

                                            │             sync.Pool              │                                  AdaptivePool                                   │
                                            │     Get      │         Put         │                 Get                 │         Put         │     GetWithCost     │
                                            │  allocs/op   │ allocs/op   vs base │ allocs/op   vs base                 │ allocs/op   vs base │ allocs/op   vs base │
AdaptivePool/created=true-20                  0.000 ± 0%                           0.000 ± 0%       ~ (p=1.000 n=20) ¹
AdaptivePool/created=false-20                 0.000 ± 0%                           0.000 ± 0%       ~ (p=1.000 n=20) ¹
AdaptivePool/accepted=true-20                                0.000 ± 0%                                                  0.000 ± 0%
AdaptivePool/accepted=false-20                               0.000 ± 0%                                                  0.000 ± 0%
AdaptivePool/created=true/fromPool=true-20                                                                                                     0.000 ± 0%
AdaptivePool/created=true/fromPool=false-20                                                                                                    0.000 ± 0%
AdaptivePool/created=false/fromPool=true-20                                                                                                    0.000 ± 0%
geomean                                                  ²               ? ³ ² ⁴               +0.00%                ²               ? ³ ² ⁴               ? ³ ² ⁴
¹ all samples are equal
² summaries must be >0 to compute geomean
³ benchmark set differs from baseline; geomeans may not be comparable
⁴ ratios must be >0 to compute geomean

                              │  sync.Pool  │         AdaptivePool          │
                              │     Get     │              Get              │
                              │    valid    │    valid     vs base          │
AdaptivePool/created=false-20   100.0M ± 0%   100.0M ± 0%  ~ (p=0.241 n=20)
```
  • Loading branch information
diegommm committed Oct 28, 2024
1 parent 25993e7 commit f617cad
Show file tree
Hide file tree
Showing 2 changed files with 177 additions and 7 deletions.
12 changes: 5 additions & 7 deletions adaptive_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,13 +174,11 @@ func (p *AdaptivePool[T]) Get() T {
// GetWithCost returns a new object with the specified cost from the pool,
// allocating it from the ItemProvider if needed.
func (p *AdaptivePool[T]) GetWithCost(cost int) T {
if v := p.pool.Get(); v != nil {
// if the item we got from the pool is smaller than needed, drop it for
// garbage collection and instead directly allocate a new one with the
// appropriate cost
if ret := v.(T); p.provider.Costof(ret) >= cost {
return ret
}
// if the item we got from the pool is smaller than needed, drop it for
// garbage collection and instead directly allocate a new one with the
// appropriate cost
if v, ok := p.pool.Get().(T); ok && p.provider.Costof(v) >= cost {
return v
}
return p.provider.New(cost)
}
Expand Down
172 changes: 172 additions & 0 deletions adaptive_pool_bench_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
package adaptivepool

import (
"sync"
"testing"
)

type nopProvider struct{}

func (p nopProvider) Costof(v int) int { return 1 }
func (p nopProvider) New(prealloc int) int { return 1 }
func (p nopProvider) Reset(v int) int { return 0 }

var _ ItemProvider[int] = nopProvider{}

type nopEstimator struct {
accept bool
}

func (nopEstimator) Suggest(EstimatorStats) int { return 1 }
func (e nopEstimator) Accept(s EstimatorStats, ct int) bool { return e.accept }

var _ Estimator = nopEstimator{}

func BenchmarkAdaptivePool(b *testing.B) {
// Consider running this benchmark like this for consistency with previous
// commits
// go test -run=- -bench=AdaptivePool -count=20 |
// benchstat -col=/implem,/method -

// For the Get* methods we do our best to try to add enough items before the
// actual benchmark starts, but it cannot be guaranteed that all of them
// will be available by the last iterations because sync.Pool is free to not
// hold them. Hence, we know it is possible that there is some error in
// those cases. For this reason, we try to report an extra metric called
// "valid" in those cases, but there are some cases where that just cannot
// be done either. By judging the results of the ones with the "valid"
// metric, it would appear that sync.Pool does not kick any of the items we
// pre-Put in it, probably due to them being so tightly close. The `if` and
// the counter being incremented could also cause a distortion, but should
// be negligible

b.Run("implem=sync.Pool/method=Get/created=true", func(b *testing.B) {
p := &sync.Pool{
New: func() any {
return 1
},
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
p.Get()
}
})

b.Run("implem=sync.Pool/method=Get/created=false", func(b *testing.B) {
p := new(sync.Pool)
for i := 0; i < b.N; i++ {
p.Put(1)

Check failure on line 59 in adaptive_pool_bench_test.go

View workflow job for this annotation

GitHub Actions / Lint

SA6002: argument should be pointer-like to avoid allocations (staticcheck)
}
var valid int
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if p.Get() != nil {
valid++
}
}
b.ReportMetric(float64(valid), "valid")
})

b.Run("implem=sync.Pool/method=Put/accepted=true", func(b *testing.B) {
p := new(sync.Pool)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
p.Put(1)

Check failure on line 77 in adaptive_pool_bench_test.go

View workflow job for this annotation

GitHub Actions / Lint

SA6002: argument should be pointer-like to avoid allocations (staticcheck)
}
})

b.Run("implem=sync.Pool/method=Put/accepted=false", func(b *testing.B) {
p := new(sync.Pool)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
p.Put(nil)
}
})

b.Run("implem=AdaptivePool/method=Get/created=true", func(b *testing.B) {
ap := New(nopProvider{}, nopEstimator{true}, 500)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ap.Get()
}
})

b.Run("implem=AdaptivePool/method=Get/created=false", func(b *testing.B) {
ap := New(nopProvider{}, nopEstimator{true}, 500)
for i := 0; i < b.N; i++ {
ap.Put(2)
}
var valid int
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if ap.Get() == 2 {
valid++
}
}
b.ReportMetric(float64(valid), "valid")
})

b.Run("implem=AdaptivePool/method=GetWithCost/created=true/fromPool=true",
func(b *testing.B) {
ap := New(nopProvider{}, nopEstimator{true}, 500)
for i := 0; i < b.N; i++ {
ap.Put(1)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ap.GetWithCost(2)
}
// we don't have a way to report the "valid" metric here
})

b.Run("implem=AdaptivePool/method=GetWithCost/created=true/fromPool=false",
func(b *testing.B) {
ap := New(nopProvider{}, nopEstimator{true}, 500)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ap.GetWithCost(2)
}
})

b.Run("implem=AdaptivePool/method=GetWithCost/created=false/fromPool=true",
func(b *testing.B) {
ap := New(nopProvider{}, nopEstimator{true}, 500)
for i := 0; i < b.N; i++ {
ap.Put(1)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ap.GetWithCost(1)
}
// we don't have a way to report the "valid" metric here
})

b.Run("implem=AdaptivePool/method=Put/accepted=true",
func(b *testing.B) {
ap := New(nopProvider{}, nopEstimator{true}, 500)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ap.Put(1)
}
})

b.Run("implem=AdaptivePool/method=Put/accepted=false",
func(b *testing.B) {
ap := New(nopProvider{}, nopEstimator{false}, 500)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ap.Put(1)
}
})
}

0 comments on commit f617cad

Please sign in to comment.