From b6af2d26317072c142c3cede31561670e3f224aa Mon Sep 17 00:00:00 2001 From: ykadowak Date: Thu, 30 Nov 2023 02:36:45 +0000 Subject: [PATCH] Revert "Add gache's generic Map as internal/sync.Map and replace standard sync.Map with it (#2115)" This reverts commit e52eb4a1f88db078a54fa0e7080982f45ccf064c. --- go.mod | 4 +- internal/circuitbreaker/manager.go | 17 +- .../client/v1/client/discoverer/discover.go | 4 +- .../client/v1/client/filter/egress/client.go | 4 +- .../client/v1/client/filter/ingress/client.go | 4 +- internal/net/dialer.go | 12 +- internal/net/grpc/client.go | 15 +- internal/net/grpc/pool/pool_bench_test.go | 9 +- internal/singleflight/singleflight.go | 7 +- internal/sync/map.go | 7 - pkg/agent/core/ngt/service/kvs/kvs.go | 43 +- pkg/agent/core/ngt/service/kvs/kvs_test.go | 169 +++-- pkg/agent/core/ngt/service/kvs/ou.go | 273 +++++++ pkg/agent/core/ngt/service/kvs/ou_test.go | 673 ++++++++++++++++++ pkg/agent/core/ngt/service/kvs/uo.go | 272 +++++++ pkg/agent/core/ngt/service/kvs/uo_test.go | 673 ++++++++++++++++++ pkg/agent/core/ngt/service/vqueue/indexmap.go | 279 ++++++++ .../core/ngt/service/vqueue/indexmap_test.go | 664 +++++++++++++++++ pkg/agent/core/ngt/service/vqueue/queue.go | 3 +- pkg/discoverer/k8s/service/discover.go | 9 +- pkg/discoverer/k8s/service/nodemap.go | 404 +++++++++++ pkg/discoverer/k8s/service/nodemap_test.go | 664 +++++++++++++++++ pkg/discoverer/k8s/service/nodemetricsmap.go | 392 ++++++++++ .../k8s/service/nodemetricsmap_test.go | 553 ++++++++++++++ pkg/discoverer/k8s/service/podmetricsmap.go | 392 ++++++++++ .../k8s/service/podmetricsmap_test.go | 553 ++++++++++++++ pkg/discoverer/k8s/service/podsmap.go | 392 ++++++++++ pkg/discoverer/k8s/service/podsmap_test.go | 553 ++++++++++++++ pkg/gateway/lb/handler/grpc/aggregation.go | 5 +- pkg/gateway/lb/service/gateway.go | 4 +- pkg/manager/index/service/indexer.go | 8 +- pkg/manager/index/service/indexinfos.go | 212 ++++++ pkg/manager/index/service/indexinfos_test.go | 439 ++++++++++++ 33 files changed, 7538 insertions(+), 174 deletions(-) delete mode 100644 internal/sync/map.go create mode 100644 pkg/agent/core/ngt/service/kvs/ou.go create mode 100644 pkg/agent/core/ngt/service/kvs/ou_test.go create mode 100644 pkg/agent/core/ngt/service/kvs/uo.go create mode 100644 pkg/agent/core/ngt/service/kvs/uo_test.go create mode 100644 pkg/agent/core/ngt/service/vqueue/indexmap.go create mode 100644 pkg/agent/core/ngt/service/vqueue/indexmap_test.go create mode 100644 pkg/discoverer/k8s/service/nodemap.go create mode 100644 pkg/discoverer/k8s/service/nodemap_test.go create mode 100644 pkg/discoverer/k8s/service/nodemetricsmap.go create mode 100644 pkg/discoverer/k8s/service/nodemetricsmap_test.go create mode 100644 pkg/discoverer/k8s/service/podmetricsmap.go create mode 100644 pkg/discoverer/k8s/service/podmetricsmap_test.go create mode 100644 pkg/discoverer/k8s/service/podsmap.go create mode 100644 pkg/discoverer/k8s/service/podsmap_test.go create mode 100644 pkg/manager/index/service/indexinfos.go create mode 100644 pkg/manager/index/service/indexinfos_test.go diff --git a/go.mod b/go.mod index c62c4f9d02..ed8ee670dc 100755 --- a/go.mod +++ b/go.mod @@ -362,7 +362,7 @@ require ( github.com/klauspost/compress v1.15.9 github.com/kpango/fastime v1.1.9 github.com/kpango/fuid v0.0.0-00010101000000-000000000000 - github.com/kpango/gache/v2 v2.0.9 + github.com/kpango/gache/v2 v2.0.0-00010101000000-000000000000 github.com/kpango/glg v1.6.15 github.com/leanovate/gopter v0.0.0-00010101000000-000000000000 github.com/lucasb-eyer/go-colorful v0.0.0-00010101000000-000000000000 @@ -457,7 +457,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect diff --git a/internal/circuitbreaker/manager.go b/internal/circuitbreaker/manager.go index 0cae88a809..a7846b9aaa 100644 --- a/internal/circuitbreaker/manager.go +++ b/internal/circuitbreaker/manager.go @@ -20,7 +20,6 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" - valdsync "github.com/vdaas/vald/internal/sync" ) // NOTE: This variable is for observability package. @@ -37,7 +36,7 @@ type CircuitBreaker interface { } type breakerManager struct { - m valdsync.Map[string, *breaker] + m sync.Map // breaker group. key: string, value: *breaker. opts []BreakerOption } @@ -73,16 +72,24 @@ func (bm *breakerManager) Do(ctx context.Context, key string, fn func(ctx contex mu.Unlock() }() + var br *breaker // Pre-loading to prevent a lot of object generation. - br, ok := bm.m.Load(key) + obj, ok := bm.m.Load(key) if !ok { br, err = newBreaker(key, bm.opts...) if err != nil { return nil, err } - br, _ = bm.m.LoadOrStore(key, br) + obj, _ = bm.m.LoadOrStore(key, br) + } + br, ok = obj.(*breaker) + if !ok { + br, err = newBreaker(key, bm.opts...) + if err != nil { + return nil, err + } + bm.m.Store(key, br) } - val, st, err = br.do(ctx, fn) if err != nil { switch st { diff --git a/internal/client/v1/client/discoverer/discover.go b/internal/client/v1/client/discoverer/discover.go index f4f2e6e8e1..c553219c93 100644 --- a/internal/client/v1/client/discoverer/discover.go +++ b/internal/client/v1/client/discoverer/discover.go @@ -20,6 +20,7 @@ package discoverer import ( "context" "reflect" + "sync" "sync/atomic" "time" @@ -31,7 +32,6 @@ import ( "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/safety" - valdsync "github.com/vdaas/vald/internal/sync" ) type Client interface { @@ -344,7 +344,7 @@ func (c *client) disconnectOldAddrs(ctx context.Context, oldAddrs, connectedAddr if !c.autoconn { return nil } - var cur valdsync.Map[string, any] // TODO: Does this have to be a sync.Map not a map? + var cur sync.Map for _, addr := range connectedAddrs { cur.Store(addr, struct{}{}) } diff --git a/internal/client/v1/client/filter/egress/client.go b/internal/client/v1/client/filter/egress/client.go index 77ea9793cf..7c1f943102 100644 --- a/internal/client/v1/client/filter/egress/client.go +++ b/internal/client/v1/client/filter/egress/client.go @@ -20,6 +20,7 @@ package egress import ( "context" "reflect" + "sync" "github.com/vdaas/vald/apis/grpc/v1/filter/egress" "github.com/vdaas/vald/apis/grpc/v1/payload" @@ -27,7 +28,6 @@ import ( "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/strings" - valdsync "github.com/vdaas/vald/internal/sync" ) type Client interface { @@ -40,7 +40,7 @@ type Client interface { type client struct { addrs []string - cl valdsync.Map[string, any] + cl sync.Map c grpc.Client } diff --git a/internal/client/v1/client/filter/ingress/client.go b/internal/client/v1/client/filter/ingress/client.go index 7222e0763d..26e2f8120e 100644 --- a/internal/client/v1/client/filter/ingress/client.go +++ b/internal/client/v1/client/filter/ingress/client.go @@ -20,6 +20,7 @@ package ingress import ( "context" "reflect" + "sync" "github.com/vdaas/vald/apis/grpc/v1/filter/ingress" "github.com/vdaas/vald/apis/grpc/v1/payload" @@ -27,7 +28,6 @@ import ( "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/strings" - valdsync "github.com/vdaas/vald/internal/sync" ) type Client interface { @@ -40,7 +40,7 @@ type Client interface { type client struct { addrs []string - cl valdsync.Map[string, any] + cl sync.Map c grpc.Client } diff --git a/internal/net/dialer.go b/internal/net/dialer.go index 692cc1e058..65ed4e863a 100644 --- a/internal/net/dialer.go +++ b/internal/net/dialer.go @@ -33,7 +33,6 @@ import ( "github.com/vdaas/vald/internal/net/control" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/safety" - valdsync "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/internal/tls" ) @@ -60,7 +59,7 @@ type dialer struct { ctrl control.SocketController sockFlg control.SocketFlag dialerDualStack bool - addrs valdsync.Map[string, *addrInfo] + addrs sync.Map der *net.Dialer dialer func(ctx context.Context, network, addr string) (Conn, error) } @@ -255,9 +254,12 @@ func (d *dialer) cachedDialer(ctx context.Context, network, addr string) (conn C isIP: isV4 || isV6, }) } else { - host = ai.host - port = ai.port - isIP = ai.isIP + info, ok := ai.(*addrInfo) + if ok { + host = info.host + port = info.port + isIP = info.isIP + } } if d.enableDNSCache && !isIP { diff --git a/internal/net/grpc/client.go b/internal/net/grpc/client.go index 95dfe07e85..c4fe2ef3a1 100644 --- a/internal/net/grpc/client.go +++ b/internal/net/grpc/client.go @@ -20,6 +20,7 @@ package grpc import ( "context" "math" + "sync" "sync/atomic" "time" @@ -37,7 +38,6 @@ import ( "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/singleflight" "github.com/vdaas/vald/internal/strings" - valdsync "github.com/vdaas/vald/internal/sync" "google.golang.org/grpc" gbackoff "google.golang.org/grpc/backoff" ) @@ -110,7 +110,7 @@ type gRPCClient struct { gbo gbackoff.Config // grpc's original backoff configuration mcd time.Duration // minimum connection timeout duration group singleflight.Group[pool.Conn] - crl valdsync.Map[string, bool] // connection request list + crl sync.Map // connection request list ech <-chan error monitorRunning atomic.Bool @@ -302,15 +302,18 @@ func (g *gRPCClient) StartConnectionMonitor(ctx context.Context) (<-chan error, } } clctx, cancel := context.WithTimeout(ctx, reconnLimitDuration) - g.crl.Range(func(addr string, enabled bool) bool { + g.crl.Range(func(a, bo interface{}) bool { select { case <-clctx.Done(): return false default: - defer g.crl.Delete(addr) - + defer g.crl.Delete(a) + addr, ok := a.(string) + if !ok { + return true + } var p pool.Conn - if enabled && g.bo != nil { + if enabled, ok := bo.(bool); ok && enabled && g.bo != nil { _, err = g.bo.Do(clctx, func(ictx context.Context) (r interface{}, ret bool, err error) { p, err = g.Connect(ictx, addr) return nil, err != nil, err diff --git a/internal/net/grpc/pool/pool_bench_test.go b/internal/net/grpc/pool/pool_bench_test.go index a8d7515c1f..04efc2cc54 100644 --- a/internal/net/grpc/pool/pool_bench_test.go +++ b/internal/net/grpc/pool/pool_bench_test.go @@ -27,7 +27,6 @@ import ( "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/log/level" "github.com/vdaas/vald/internal/net" - valdsync "github.com/vdaas/vald/internal/sync" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" ) @@ -134,7 +133,7 @@ func Benchmark_StaticDial(b *testing.B) { b.Error(err) } - conns := new(valdsync.Map[string, *grpc.ClientConn]) + conns := new(sync.Map) conns.Store(DefaultServerAddr, conn) b.StopTimer() @@ -144,7 +143,7 @@ func Benchmark_StaticDial(b *testing.B) { for i := 0; i < b.N; i++ { val, ok := conns.Load(DefaultServerAddr) if ok { - do(b, val) + do(b, val.(*ClientConn)) } } b.StopTimer() @@ -191,7 +190,7 @@ func BenchmarkParallel_StaticDial(b *testing.B) { b.Error(err) } - conns := new(valdsync.Map[string, *grpc.ClientConn]) + conns := new(sync.Map) conns.Store(DefaultServerAddr, conn) b.StopTimer() @@ -202,7 +201,7 @@ func BenchmarkParallel_StaticDial(b *testing.B) { for pb.Next() { val, ok := conns.Load(DefaultServerAddr) if ok { - do(b, val) + do(b, val.(*ClientConn)) } } }) diff --git a/internal/singleflight/singleflight.go b/internal/singleflight/singleflight.go index 94e72b149c..ac669c089d 100644 --- a/internal/singleflight/singleflight.go +++ b/internal/singleflight/singleflight.go @@ -21,8 +21,6 @@ import ( "context" "sync" "sync/atomic" - - valdsync "github.com/vdaas/vald/internal/sync" ) type call[V any] struct { @@ -38,7 +36,7 @@ type Group[V any] interface { } type group[V any] struct { - m valdsync.Map[string, *call[V]] + m sync.Map } // New returns Group implementation. @@ -51,7 +49,8 @@ func New[V any]() Group[V] { // If duplicate comes, the duplicated call with the same key will wait for the first caller return. // It returns the result and the error of the given function, and whether the result is shared from the first caller. func (g *group[V]) Do(_ context.Context, key string, fn func() (V, error)) (v V, shared bool, err error) { - c, loaded := g.m.LoadOrStore(key, new(call[V])) + actual, loaded := g.m.LoadOrStore(key, new(call[V])) + c := actual.(*call[V]) if loaded { atomic.AddUint64(&c.dups, 1) c.wg.Wait() diff --git a/internal/sync/map.go b/internal/sync/map.go deleted file mode 100644 index 25bb07f071..0000000000 --- a/internal/sync/map.go +++ /dev/null @@ -1,7 +0,0 @@ -package sync - -import gache "github.com/kpango/gache/v2" - -type Map[K comparable, V any] struct { - gache.Map[K, V] -} diff --git a/pkg/agent/core/ngt/service/kvs/kvs.go b/pkg/agent/core/ngt/service/kvs/kvs.go index 281a831a44..f471784457 100644 --- a/pkg/agent/core/ngt/service/kvs/kvs.go +++ b/pkg/agent/core/ngt/service/kvs/kvs.go @@ -23,7 +23,6 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/safety" - valdsync "github.com/vdaas/vald/internal/sync" "github.com/zeebo/xxh3" ) @@ -39,21 +38,11 @@ type BidiMap interface { Close() error } -type valueStructOu struct { - value string - timestamp int64 -} - -type ValueStructUo struct { - value uint32 - timestamp int64 -} - type bidi struct { concurrency int l uint64 - ou [slen]*valdsync.Map[uint32, valueStructOu] - uo [slen]*valdsync.Map[string, ValueStructUo] + ou [slen]*ou + uo [slen]*uo eg errgroup.Group } @@ -76,8 +65,8 @@ func New(opts ...Option) BidiMap { opt(b) } for i := range b.ou { - b.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - b.uo[i] = new(valdsync.Map[string, ValueStructUo]) + b.ou[i] = new(ou) + b.uo[i] = new(uo) } if b.eg == nil { @@ -94,43 +83,32 @@ func New(opts ...Option) BidiMap { // Get returns the value and boolean from the given key. // If the value does not exist, it returns nil and false. func (b *bidi) Get(key string) (uint32, int64, bool) { - vs, ok := b.uo[xxh3.HashString(key)&mask].Load(key) - if !ok { - return 0, 0, false - } - return vs.value, vs.timestamp, true + return b.uo[xxh3.HashString(key)&mask].Load(key) } // GetInverse returns the key and the boolean from the given val. // If the key does not exist, it returns nil and false. func (b *bidi) GetInverse(val uint32) (string, int64, bool) { - vs, ok := b.ou[val&mask].Load(val) - if !ok { - return "", 0, false - } - - return vs.value, vs.timestamp, true + return b.ou[val&mask].Load(val) } // Set sets the key and val to the bidi. func (b *bidi) Set(key string, val uint32, ts int64) { id := xxh3.HashString(key) & mask - vs, loaded := b.uo[id].LoadOrStore(key, ValueStructUo{value: val, timestamp: ts}) - old := vs.value + old, _, loaded := b.uo[id].LoadOrStore(key, ValueStructUo{value: val, timestamp: ts}) if !loaded { // increase the count only if the key is not exists before atomic.AddUint64(&b.l, 1) } else { b.ou[val&mask].Delete(old) // delete paired map value using old value_key b.uo[id].Store(key, ValueStructUo{value: val, timestamp: ts}) // store if loaded for overwrite new value } - b.ou[val&mask].Store(val, valueStructOu{value: key, timestamp: ts}) // store anytime + b.ou[val&mask].Store(val, ValueStructOu{value: key, timestamp: ts}) // store anytime } // Delete deletes the key and the value from the bidi by the given key and returns val and true. // If the value for the key does not exist, it returns nil and false. func (b *bidi) Delete(key string) (val uint32, ok bool) { - vs, ok := b.uo[xxh3.HashString(key)&mask].LoadAndDelete(key) - val = vs.value + val, _, ok = b.uo[xxh3.HashString(key)&mask].LoadAndDelete(key) if ok { b.ou[val&mask].Delete(val) atomic.AddUint64(&b.l, ^uint64(0)) @@ -141,8 +119,7 @@ func (b *bidi) Delete(key string) (val uint32, ok bool) { // DeleteInverse deletes the key and the value from the bidi by the given val and returns the key and true. // If the key for the val does not exist, it returns nil and false. func (b *bidi) DeleteInverse(val uint32) (key string, ok bool) { - vs, ok := b.ou[val&mask].LoadAndDelete(val) - key = vs.value + key, _, ok = b.ou[val&mask].LoadAndDelete(val) if ok { b.uo[xxh3.HashString(key)&mask].Delete(key) atomic.AddUint64(&b.l, ^uint64(0)) diff --git a/pkg/agent/core/ngt/service/kvs/kvs_test.go b/pkg/agent/core/ngt/service/kvs/kvs_test.go index 60d811ac9f..84e21ab258 100644 --- a/pkg/agent/core/ngt/service/kvs/kvs_test.go +++ b/pkg/agent/core/ngt/service/kvs/kvs_test.go @@ -27,7 +27,6 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" - valdsync "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/internal/test/goleak" ) @@ -52,12 +51,12 @@ func TestNew(t *testing.T) { tests := []test{ func() test { var ( - wantOu [slen]*valdsync.Map[uint32, valueStructOu] - wantUo [slen]*valdsync.Map[string, ValueStructUo] + wantOu [slen]*ou + wantUo [slen]*uo ) for i := 0; i < slen; i++ { - wantOu[i] = new(valdsync.Map[uint32, valueStructOu]) - wantUo[i] = new(valdsync.Map[string, ValueStructUo]) + wantOu[i] = new(ou) + wantUo[i] = new(uo) } return test{ name: "return the bidi struct", @@ -104,8 +103,8 @@ func Test_bidi_Get(t *testing.T) { key string } type fields struct { - ou [slen]*valdsync.Map[uint32, valueStructOu] - uo [slen]*valdsync.Map[string, ValueStructUo] + ou [slen]*ou + uo [slen]*uo l uint64 } type want struct { @@ -144,8 +143,8 @@ func Test_bidi_Get(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -177,8 +176,8 @@ func Test_bidi_Get(t *testing.T) { l: 100, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -210,8 +209,8 @@ func Test_bidi_Get(t *testing.T) { l: math.MaxUint64, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -243,8 +242,8 @@ func Test_bidi_Get(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -276,8 +275,8 @@ func Test_bidi_Get(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -307,8 +306,8 @@ func Test_bidi_Get(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -370,8 +369,8 @@ func Test_bidi_GetInverse(t *testing.T) { val uint32 } type fields struct { - ou [slen]*valdsync.Map[uint32, valueStructOu] - uo [slen]*valdsync.Map[string, ValueStructUo] + ou [slen]*ou + uo [slen]*uo l uint64 } type want struct { @@ -410,8 +409,8 @@ func Test_bidi_GetInverse(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -443,8 +442,8 @@ func Test_bidi_GetInverse(t *testing.T) { l: 100, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -476,8 +475,8 @@ func Test_bidi_GetInverse(t *testing.T) { l: math.MaxUint64, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -509,8 +508,8 @@ func Test_bidi_GetInverse(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -542,8 +541,8 @@ func Test_bidi_GetInverse(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -573,8 +572,8 @@ func Test_bidi_GetInverse(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -639,8 +638,8 @@ func Test_bidi_Set(t *testing.T) { ts int64 } type fields struct { - ou [slen]*valdsync.Map[uint32, valueStructOu] - uo [slen]*valdsync.Map[string, ValueStructUo] + ou [slen]*ou + uo [slen]*uo l uint64 } type want struct { @@ -687,8 +686,8 @@ func Test_bidi_Set(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -718,8 +717,8 @@ func Test_bidi_Set(t *testing.T) { l: 100, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -749,8 +748,8 @@ func Test_bidi_Set(t *testing.T) { l: math.MaxUint64, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -780,8 +779,8 @@ func Test_bidi_Set(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -820,8 +819,8 @@ func Test_bidi_Set(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } key := "45637ec4-c85f-11ea-87d0" @@ -845,8 +844,8 @@ func Test_bidi_Set(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var val uint32 = 14438 @@ -869,8 +868,8 @@ func Test_bidi_Set(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } return test{ @@ -922,8 +921,8 @@ func Test_bidi_Delete(t *testing.T) { key string } type fields struct { - ou [slen]*valdsync.Map[uint32, valueStructOu] - uo [slen]*valdsync.Map[string, ValueStructUo] + ou [slen]*ou + uo [slen]*uo l uint64 } type want struct { @@ -955,8 +954,8 @@ func Test_bidi_Delete(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1002,8 +1001,8 @@ func Test_bidi_Delete(t *testing.T) { l: 100, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1049,8 +1048,8 @@ func Test_bidi_Delete(t *testing.T) { l: math.MaxUint64, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1096,8 +1095,8 @@ func Test_bidi_Delete(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1140,8 +1139,8 @@ func Test_bidi_Delete(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1203,8 +1202,8 @@ func Test_bidi_DeleteInverse(t *testing.T) { val uint32 } type fields struct { - ou [slen]*valdsync.Map[uint32, valueStructOu] - uo [slen]*valdsync.Map[string, ValueStructUo] + ou [slen]*ou + uo [slen]*uo l uint64 } type want struct { @@ -1236,8 +1235,8 @@ func Test_bidi_DeleteInverse(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1283,8 +1282,8 @@ func Test_bidi_DeleteInverse(t *testing.T) { l: 100, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1330,8 +1329,8 @@ func Test_bidi_DeleteInverse(t *testing.T) { l: math.MaxUint64, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1377,13 +1376,13 @@ func Test_bidi_DeleteInverse(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( - key = "45637ec4-c85f-11ea-87d0" - ts int64 = 24438 + key string = "45637ec4-c85f-11ea-87d0" + ts int64 = 24438 ) return test{ @@ -1421,8 +1420,8 @@ func Test_bidi_DeleteInverse(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1485,8 +1484,8 @@ func Test_bidi_Range(t *testing.T) { f func(string, uint32, int64) bool } type fields struct { - ou [slen]*valdsync.Map[uint32, valueStructOu] - uo [slen]*valdsync.Map[string, ValueStructUo] + ou [slen]*ou + uo [slen]*uo l uint64 } type want struct { @@ -1511,8 +1510,8 @@ func Test_bidi_Range(t *testing.T) { l: 0, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1563,8 +1562,8 @@ func Test_bidi_Range(t *testing.T) { l: 100, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1615,8 +1614,8 @@ func Test_bidi_Range(t *testing.T) { l: math.MaxUint64, } for i := 0; i < slen; i++ { - fields.ou[i] = new(valdsync.Map[uint32, valueStructOu]) - fields.uo[i] = new(valdsync.Map[string, ValueStructUo]) + fields.ou[i] = new(ou) + fields.uo[i] = new(uo) } var ( @@ -1700,8 +1699,8 @@ func Test_bidi_Range(t *testing.T) { func Test_bidi_Len(t *testing.T) { t.Parallel() type fields struct { - ou [slen]*valdsync.Map[uint32, valueStructOu] - uo [slen]*valdsync.Map[string, ValueStructUo] + ou [slen]*ou + uo [slen]*uo l uint64 } type want struct { diff --git a/pkg/agent/core/ngt/service/kvs/ou.go b/pkg/agent/core/ngt/service/kvs/ou.go new file mode 100644 index 0000000000..a35bd3afd4 --- /dev/null +++ b/pkg/agent/core/ngt/service/kvs/ou.go @@ -0,0 +1,273 @@ +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package kvs + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +type ou struct { + mu sync.Mutex + read atomic.Value + dirty map[uint32]*entryOu + misses int +} + +type readOnlyOu struct { + m map[uint32]*entryOu + amended bool +} + +// skipcq: GSC-G103 +var expungedOu = unsafe.Pointer(new(ValueStructOu)) + +type entryOu struct { + p unsafe.Pointer +} + +type ValueStructOu struct { + value string + timestamp int64 +} + +func newEntryOu(i ValueStructOu) *entryOu { + // skipcq: GSC-G103 + return &entryOu{p: unsafe.Pointer(&i)} +} + +func (m *ou) Load(key uint32) (value string, timestamp int64, ok bool) { + read, _ := m.read.Load().(readOnlyOu) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyOu) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, timestamp, false + } + return e.load() +} + +func (e *entryOu) load() (value string, timestamp int64, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedOu { + return value, timestamp, false + } + return (*ValueStructOu)(p).value, (*ValueStructOu)(p).timestamp, true +} + +func (m *ou) Store(key uint32, value ValueStructOu) { + read, _ := m.read.Load().(readOnlyOu) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + m.mu.Lock() + read, _ = m.read.Load().(readOnlyOu) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + m.dirtyLocked() + m.read.Store(readOnlyOu{m: read.m, amended: true}) + } + m.dirty[key] = newEntryOu(value) + } + m.mu.Unlock() +} + +func (e *entryOu) tryStore(i *ValueStructOu) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expungedOu { + return false + } + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +func (e *entryOu) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expungedOu, nil) +} + +func (e *entryOu) storeLocked(i *ValueStructOu) { + // skipcq: GSC-G103 + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +func (m *ou) LoadOrStore(key uint32, value ValueStructOu) (actual string, at int64, loaded bool) { + read, _ := m.read.Load().(readOnlyOu) + if e, ok := read.m[key]; ok { + actual, at, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, at, loaded + } + } + m.mu.Lock() + read, _ = m.read.Load().(readOnlyOu) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, at, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, at, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + m.dirtyLocked() + m.read.Store(readOnlyOu{m: read.m, amended: true}) + } + m.dirty[key] = newEntryOu(value) + actual, at, loaded = value.value, value.timestamp, false + } + m.mu.Unlock() + return actual, at, loaded +} + +func (e *entryOu) tryLoadOrStore(i ValueStructOu) (actual string, at int64, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expungedOu { + return actual, at, false, false + } + if p != nil { + return (*ValueStructOu)(p).value, (*ValueStructOu)(p).timestamp, true, true + } + ic := i + for { + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i.value, i.timestamp, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expungedOu { + return actual, at, false, false + } + if p != nil { + return (*ValueStructOu)(p).value, (*ValueStructOu)(p).timestamp, true, true + } + } +} + +func (m *ou) LoadAndDelete(key uint32) (value string, timestamp int64, loaded bool) { + read, _ := m.read.Load().(readOnlyOu) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyOu) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return value, timestamp, false +} + +func (m *ou) Delete(key uint32) { + m.LoadAndDelete(key) +} + +func (e *entryOu) delete() (value string, timestamp int64, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedOu { + return value, timestamp, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return (*ValueStructOu)(p).value, (*ValueStructOu)(p).timestamp, true + } + } +} + +func (m *ou) Range(f func(key uint32, value ValueStructOu) bool) { + read, _ := m.read.Load().(readOnlyOu) + if read.amended { + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyOu) + if read.amended { + read = readOnlyOu{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, t, ok := e.load() + if !ok { + continue + } + if !f(k, ValueStructOu{value: v, timestamp: t}) { + break + } + } +} + +func (m *ou) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnlyOu{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *ou) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnlyOu) + m.dirty = make(map[uint32]*entryOu, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entryOu) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expungedOu) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expungedOu +} diff --git a/pkg/agent/core/ngt/service/kvs/ou_test.go b/pkg/agent/core/ngt/service/kvs/ou_test.go new file mode 100644 index 0000000000..85caf13842 --- /dev/null +++ b/pkg/agent/core/ngt/service/kvs/ou_test.go @@ -0,0 +1,673 @@ +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package kvs + +// NOT IMPLEMENTED BELOW +// +// func Test_ou_Load(t *testing.T) { +// type args struct { +// key uint32 +// } +// type fields struct { +// read atomic.Value +// dirty map[uint32]*entryOu +// misses int +// } +// type want struct { +// wantValue string +// wantTimestamp int64 +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, string, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue string, gotTimestamp int64, gotOk bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:0, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:0, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &ou{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotTimestamp, gotOk := m.Load(test.args.key) +// if err := checkFunc(test.want, gotValue, gotTimestamp, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_ou_Store(t *testing.T) { +// type args struct { +// key uint32 +// value ValueStructOu +// } +// type fields struct { +// read atomic.Value +// dirty map[uint32]*entryOu +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:0, +// value:ValueStructOu{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:0, +// value:ValueStructOu{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &ou{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Store(test.args.key, test.args.value) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_ou_LoadOrStore(t *testing.T) { +// type args struct { +// key uint32 +// value ValueStructOu +// } +// type fields struct { +// read atomic.Value +// dirty map[uint32]*entryOu +// misses int +// } +// type want struct { +// wantActual string +// wantAt int64 +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, string, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotActual string, gotAt int64, gotLoaded bool) error { +// if !reflect.DeepEqual(gotActual, w.wantActual) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotActual, w.wantActual) +// } +// if !reflect.DeepEqual(gotAt, w.wantAt) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotAt, w.wantAt) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:0, +// value:ValueStructOu{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:0, +// value:ValueStructOu{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &ou{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotActual, gotAt, gotLoaded := m.LoadOrStore(test.args.key, test.args.value) +// if err := checkFunc(test.want, gotActual, gotAt, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_ou_LoadAndDelete(t *testing.T) { +// type args struct { +// key uint32 +// } +// type fields struct { +// read atomic.Value +// dirty map[uint32]*entryOu +// misses int +// } +// type want struct { +// wantValue string +// wantTimestamp int64 +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, string, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue string, gotTimestamp int64, gotLoaded bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:0, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:0, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &ou{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotTimestamp, gotLoaded := m.LoadAndDelete(test.args.key) +// if err := checkFunc(test.want, gotValue, gotTimestamp, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_ou_Delete(t *testing.T) { +// type args struct { +// key uint32 +// } +// type fields struct { +// read atomic.Value +// dirty map[uint32]*entryOu +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:0, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:0, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &ou{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Delete(test.args.key) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_ou_Range(t *testing.T) { +// type args struct { +// f func(key uint32, value ValueStructOu) bool +// } +// type fields struct { +// read atomic.Value +// dirty map[uint32]*entryOu +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &ou{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Range(test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/agent/core/ngt/service/kvs/uo.go b/pkg/agent/core/ngt/service/kvs/uo.go new file mode 100644 index 0000000000..9ece9896d0 --- /dev/null +++ b/pkg/agent/core/ngt/service/kvs/uo.go @@ -0,0 +1,272 @@ +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package kvs + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +type uo struct { + mu sync.Mutex + read atomic.Value + dirty map[string]*entryUo + misses int +} + +type readOnlyUo struct { + m map[string]*entryUo + amended bool +} + +// skipcq: GSC-G103 +var expungedUo = unsafe.Pointer(new(ValueStructUo)) + +type entryUo struct { + p unsafe.Pointer +} + +type ValueStructUo struct { + value uint32 + timestamp int64 +} + +func newEntryUo(i ValueStructUo) *entryUo { + // skipcq: GSC-G103 + return &entryUo{p: unsafe.Pointer(&i)} +} + +func (m *uo) Load(key string) (value uint32, timestamp int64, ok bool) { + read, _ := m.read.Load().(readOnlyUo) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyUo) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, timestamp, false + } + return e.load() +} + +func (e *entryUo) load() (value uint32, timestamp int64, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedUo { + return value, timestamp, false + } + return (*ValueStructUo)(p).value, (*ValueStructUo)(p).timestamp, true +} + +func (m *uo) Store(key string, value ValueStructUo) { + read, _ := m.read.Load().(readOnlyUo) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + m.mu.Lock() + read, _ = m.read.Load().(readOnlyUo) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + m.dirtyLocked() + m.read.Store(readOnlyUo{m: read.m, amended: true}) + } + m.dirty[key] = newEntryUo(value) + } + m.mu.Unlock() +} + +func (e *entryUo) tryStore(i *ValueStructUo) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expungedUo { + return false + } + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +func (e *entryUo) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expungedUo, nil) +} + +func (e *entryUo) storeLocked(i *ValueStructUo) { + // skipcq: GSC-G103 + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +func (m *uo) LoadOrStore(key string, value ValueStructUo) (actual uint32, at int64, loaded bool) { + read, _ := m.read.Load().(readOnlyUo) + if e, ok := read.m[key]; ok { + actual, at, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, at, loaded + } + } + m.mu.Lock() + read, _ = m.read.Load().(readOnlyUo) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, at, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, at, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + m.dirtyLocked() + m.read.Store(readOnlyUo{m: read.m, amended: true}) + } + m.dirty[key] = newEntryUo(value) + actual, at, loaded = value.value, value.timestamp, false + } + m.mu.Unlock() + return actual, at, loaded +} + +func (e *entryUo) tryLoadOrStore(i ValueStructUo) (actual uint32, at int64, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expungedUo { + return actual, at, false, false + } + if p != nil { + return (*ValueStructUo)(p).value, (*ValueStructUo)(p).timestamp, true, true + } + ic := i + for { + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i.value, i.timestamp, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expungedUo { + return actual, at, false, false + } + if p != nil { + return (*ValueStructUo)(p).value, (*ValueStructUo)(p).timestamp, true, true + } + } +} + +func (m *uo) LoadAndDelete(key string) (value uint32, timestamp int64, loaded bool) { + read, _ := m.read.Load().(readOnlyUo) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyUo) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return value, timestamp, false +} + +func (m *uo) Delete(key string) { + m.LoadAndDelete(key) +} + +func (e *entryUo) delete() (value uint32, timestamp int64, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedUo { + return value, timestamp, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return (*ValueStructUo)(p).value, (*ValueStructUo)(p).timestamp, true + } + } +} + +func (m *uo) Range(f func(key string, value ValueStructUo) bool) { + read, _ := m.read.Load().(readOnlyUo) + if read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyUo) + if read.amended { + read = readOnlyUo{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, t, ok := e.load() + if !ok { + continue + } + if !f(k, ValueStructUo{value: v, timestamp: t}) { + break + } + } +} + +func (m *uo) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnlyUo{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *uo) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnlyUo) + m.dirty = make(map[string]*entryUo, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entryUo) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expungedUo) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expungedUo +} diff --git a/pkg/agent/core/ngt/service/kvs/uo_test.go b/pkg/agent/core/ngt/service/kvs/uo_test.go new file mode 100644 index 0000000000..95291c6a5a --- /dev/null +++ b/pkg/agent/core/ngt/service/kvs/uo_test.go @@ -0,0 +1,673 @@ +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package kvs + +// NOT IMPLEMENTED BELOW +// +// func Test_uo_Load(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryUo +// misses int +// } +// type want struct { +// wantValue uint32 +// wantTimestamp int64 +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, uint32, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue uint32, gotTimestamp int64, gotOk bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &uo{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotTimestamp, gotOk := m.Load(test.args.key) +// if err := checkFunc(test.want, gotValue, gotTimestamp, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_uo_Store(t *testing.T) { +// type args struct { +// key string +// value ValueStructUo +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryUo +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:ValueStructUo{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:ValueStructUo{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &uo{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Store(test.args.key, test.args.value) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_uo_LoadOrStore(t *testing.T) { +// type args struct { +// key string +// value ValueStructUo +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryUo +// misses int +// } +// type want struct { +// wantActual uint32 +// wantAt int64 +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, uint32, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotActual uint32, gotAt int64, gotLoaded bool) error { +// if !reflect.DeepEqual(gotActual, w.wantActual) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotActual, w.wantActual) +// } +// if !reflect.DeepEqual(gotAt, w.wantAt) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotAt, w.wantAt) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:ValueStructUo{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:ValueStructUo{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &uo{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotActual, gotAt, gotLoaded := m.LoadOrStore(test.args.key, test.args.value) +// if err := checkFunc(test.want, gotActual, gotAt, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_uo_LoadAndDelete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryUo +// misses int +// } +// type want struct { +// wantValue uint32 +// wantTimestamp int64 +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, uint32, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue uint32, gotTimestamp int64, gotLoaded bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &uo{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotTimestamp, gotLoaded := m.LoadAndDelete(test.args.key) +// if err := checkFunc(test.want, gotValue, gotTimestamp, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_uo_Delete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryUo +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &uo{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Delete(test.args.key) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_uo_Range(t *testing.T) { +// type args struct { +// f func(key string, value ValueStructUo) bool +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryUo +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &uo{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Range(test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/agent/core/ngt/service/vqueue/indexmap.go b/pkg/agent/core/ngt/service/vqueue/indexmap.go new file mode 100644 index 0000000000..1fc9f431ad --- /dev/null +++ b/pkg/agent/core/ngt/service/vqueue/indexmap.go @@ -0,0 +1,279 @@ +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package vqueue + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +type indexMap struct { + mu sync.Mutex + read atomic.Value + dirty map[string]*entryIndexMap + misses int +} + +type readOnlyIndexMap struct { + m map[string]*entryIndexMap + amended bool +} + +// skipcq: GSC-G103 +var expungedIndexMap = unsafe.Pointer(new(index)) + +type entryIndexMap struct { + // skipcq: GSC-G103 + p unsafe.Pointer +} + +func newEntryIndexMap(i index) *entryIndexMap { + // skipcq: GSC-G103 + return &entryIndexMap{p: unsafe.Pointer(&i)} +} + +func (m *indexMap) Load(key string) (value index, ok bool) { + read, _ := m.read.Load().(readOnlyIndexMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + + read, _ = m.read.Load().(readOnlyIndexMap) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, false + } + return e.load() +} + +func (e *entryIndexMap) load() (value index, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedIndexMap { + return value, false + } + return *(*index)(p), true +} + +func (m *indexMap) Store(key string, value index) { + read, _ := m.read.Load().(readOnlyIndexMap) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyIndexMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + + m.dirtyLocked() + m.read.Store(readOnlyIndexMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryIndexMap(value) + } + m.mu.Unlock() +} + +func (e *entryIndexMap) tryStore(i *index) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expungedIndexMap { + return false + } + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +func (e *entryIndexMap) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expungedIndexMap, nil) +} + +func (e *entryIndexMap) storeLocked(i *index) { + // skipcq: GSC-G103 + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +func (m *indexMap) LoadOrStore(key string, value index) (actual index, loaded bool) { + read, _ := m.read.Load().(readOnlyIndexMap) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyIndexMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + + m.dirtyLocked() + m.read.Store(readOnlyIndexMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryIndexMap(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +func (e *entryIndexMap) tryLoadOrStore(i index) (actual index, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expungedIndexMap { + return actual, false, false + } + if p != nil { + return *(*index)(p), true, true + } + + ic := i + for { + + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expungedIndexMap { + return actual, false, false + } + if p != nil { + return *(*index)(p), true, true + } + } +} + +func (m *indexMap) LoadAndDelete(key string) (value index, loaded bool) { + read, _ := m.read.Load().(readOnlyIndexMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyIndexMap) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return value, false +} + +func (m *indexMap) Delete(key string) { + m.LoadAndDelete(key) +} + +func (e *entryIndexMap) delete() (value index, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedIndexMap { + return value, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return *(*index)(p), true + } + } +} + +func (m *indexMap) Range(f func(key string, value index) bool) { + read, _ := m.read.Load().(readOnlyIndexMap) + if read.amended { + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyIndexMap) + if read.amended { + read = readOnlyIndexMap{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *indexMap) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnlyIndexMap{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *indexMap) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnlyIndexMap) + m.dirty = make(map[string]*entryIndexMap, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entryIndexMap) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expungedIndexMap) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expungedIndexMap +} diff --git a/pkg/agent/core/ngt/service/vqueue/indexmap_test.go b/pkg/agent/core/ngt/service/vqueue/indexmap_test.go new file mode 100644 index 0000000000..1cba535f35 --- /dev/null +++ b/pkg/agent/core/ngt/service/vqueue/indexmap_test.go @@ -0,0 +1,664 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vqueue + +// NOT IMPLEMENTED BELOW +// +// func Test_indexMap_Load(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexMap +// misses int +// } +// type want struct { +// wantValue index +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, index, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue index, gotOk bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotOk := m.Load(test.args.key) +// if err := checkFunc(test.want, gotValue, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_indexMap_Store(t *testing.T) { +// type args struct { +// key string +// value index +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:index{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:index{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Store(test.args.key, test.args.value) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_indexMap_LoadOrStore(t *testing.T) { +// type args struct { +// key string +// value index +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexMap +// misses int +// } +// type want struct { +// wantActual index +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, index, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotActual index, gotLoaded bool) error { +// if !reflect.DeepEqual(gotActual, w.wantActual) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotActual, w.wantActual) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:index{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:index{}, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotActual, gotLoaded := m.LoadOrStore(test.args.key, test.args.value) +// if err := checkFunc(test.want, gotActual, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_indexMap_LoadAndDelete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexMap +// misses int +// } +// type want struct { +// wantValue index +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, index, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue index, gotLoaded bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotLoaded := m.LoadAndDelete(test.args.key) +// if err := checkFunc(test.want, gotValue, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_indexMap_Delete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Delete(test.args.key) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_indexMap_Range(t *testing.T) { +// type args struct { +// f func(key string, value index) bool +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Range(test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/agent/core/ngt/service/vqueue/queue.go b/pkg/agent/core/ngt/service/vqueue/queue.go index bc724529a4..b8b2202256 100644 --- a/pkg/agent/core/ngt/service/vqueue/queue.go +++ b/pkg/agent/core/ngt/service/vqueue/queue.go @@ -26,7 +26,6 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/slices" - valdsync "github.com/vdaas/vald/internal/sync" ) // Queue represents vector queue cache interface @@ -43,7 +42,7 @@ type Queue interface { } type vqueue struct { - il, dl valdsync.Map[string, index] + il, dl indexMap ic, dc uint64 } diff --git a/pkg/discoverer/k8s/service/discover.go b/pkg/discoverer/k8s/service/discover.go index c98931ee6f..dc2e15ff00 100644 --- a/pkg/discoverer/k8s/service/discover.go +++ b/pkg/discoverer/k8s/service/discover.go @@ -37,7 +37,6 @@ import ( "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/slices" - valdsync "github.com/vdaas/vald/internal/sync" ) type Discoverer interface { @@ -48,10 +47,10 @@ type Discoverer interface { type discoverer struct { maxPods int - nodes valdsync.Map[string, *node.Node] - nodeMetrics valdsync.Map[string, mnode.Node] - pods valdsync.Map[string, []pod.Pod] - podMetrics valdsync.Map[string, mpod.Pod] + nodes nodeMap + nodeMetrics nodeMetricsMap + pods podsMap + podMetrics podMetricsMap podsByNode atomic.Value podsByNamespace atomic.Value podsByName atomic.Value diff --git a/pkg/discoverer/k8s/service/nodemap.go b/pkg/discoverer/k8s/service/nodemap.go new file mode 100644 index 0000000000..32ad7aa0b8 --- /dev/null +++ b/pkg/discoverer/k8s/service/nodemap.go @@ -0,0 +1,404 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/vdaas/vald/internal/k8s/node" +) + +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +type nodeMap struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[string]*entryNodeMap + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnlyNodeMap struct { + m map[string]*entryNodeMap + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +// skipcq: GSC-G103 +var expungedNodeMap = unsafe.Pointer(new(*node.Node)) + +// An entry is a slot in the map corresponding to a particular key. +type entryNodeMap struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntryNodeMap(i *node.Node) *entryNodeMap { + // skipcq: GSC-G103 + return &entryNodeMap{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *nodeMap) Load(key string) (value *node.Node, ok bool) { + read, _ := m.read.Load().(readOnlyNodeMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnlyNodeMap) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, false + } + return e.load() +} + +func (e *entryNodeMap) load() (value *node.Node, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedNodeMap { + return value, false + } + return *(**node.Node)(p), true +} + +// Store sets the value for a key. +func (m *nodeMap) Store(key string, value *node.Node) { + read, _ := m.read.Load().(readOnlyNodeMap) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyNodeMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnlyNodeMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryNodeMap(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entryNodeMap) tryStore(i **node.Node) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expungedNodeMap { + return false + } + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entryNodeMap) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expungedNodeMap, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entryNodeMap) storeLocked(i **node.Node) { + // skipcq: GSC-G103 + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *nodeMap) LoadOrStore(key string, value *node.Node) (actual *node.Node, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnlyNodeMap) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyNodeMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnlyNodeMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryNodeMap(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entryNodeMap) tryLoadOrStore(i *node.Node) (actual *node.Node, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expungedNodeMap { + return actual, false, false + } + if p != nil { + return *(**node.Node)(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expungedNodeMap { + return actual, false, false + } + if p != nil { + return *(**node.Node)(p), true, true + } + } +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +func (m *nodeMap) LoadAndDelete(key string) (value *node.Node, loaded bool) { + read, _ := m.read.Load().(readOnlyNodeMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyNodeMap) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return value, false +} + +// Delete deletes the value for a key. +func (m *nodeMap) Delete(key string) { + m.LoadAndDelete(key) +} + +func (e *entryNodeMap) delete() (value *node.Node, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedNodeMap { + return value, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return *(**node.Node)(p), true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *nodeMap) Range(f func(key string, value *node.Node) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnlyNodeMap) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnlyNodeMap) + if read.amended { + read = readOnlyNodeMap{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *nodeMap) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnlyNodeMap{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *nodeMap) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnlyNodeMap) + m.dirty = make(map[string]*entryNodeMap, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entryNodeMap) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expungedNodeMap) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expungedNodeMap +} diff --git a/pkg/discoverer/k8s/service/nodemap_test.go b/pkg/discoverer/k8s/service/nodemap_test.go new file mode 100644 index 0000000000..44312d1460 --- /dev/null +++ b/pkg/discoverer/k8s/service/nodemap_test.go @@ -0,0 +1,664 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +// NOT IMPLEMENTED BELOW +// +// func Test_nodeMap_Load(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMap +// misses int +// } +// type want struct { +// wantValue *node.Node +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *node.Node, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue *node.Node, gotOk bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotOk := m.Load(test.args.key) +// if err := checkFunc(test.want, gotValue, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_nodeMap_Store(t *testing.T) { +// type args struct { +// key string +// value *node.Node +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Store(test.args.key, test.args.value) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_nodeMap_LoadOrStore(t *testing.T) { +// type args struct { +// key string +// value *node.Node +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMap +// misses int +// } +// type want struct { +// wantActual *node.Node +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *node.Node, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotActual *node.Node, gotLoaded bool) error { +// if !reflect.DeepEqual(gotActual, w.wantActual) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotActual, w.wantActual) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotActual, gotLoaded := m.LoadOrStore(test.args.key, test.args.value) +// if err := checkFunc(test.want, gotActual, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_nodeMap_LoadAndDelete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMap +// misses int +// } +// type want struct { +// wantValue *node.Node +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *node.Node, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue *node.Node, gotLoaded bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotLoaded := m.LoadAndDelete(test.args.key) +// if err := checkFunc(test.want, gotValue, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_nodeMap_Delete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Delete(test.args.key) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_nodeMap_Range(t *testing.T) { +// type args struct { +// f func(key string, value *node.Node) bool +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Range(test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/discoverer/k8s/service/nodemetricsmap.go b/pkg/discoverer/k8s/service/nodemetricsmap.go new file mode 100644 index 0000000000..4199e31ee8 --- /dev/null +++ b/pkg/discoverer/k8s/service/nodemetricsmap.go @@ -0,0 +1,392 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +import ( + "sync" + "sync/atomic" + "unsafe" + + mnode "github.com/vdaas/vald/internal/k8s/metrics/node" +) + +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +type nodeMetricsMap struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[string]*entryNodeMetricsMap + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnlyNodeMetricsMap struct { + m map[string]*entryNodeMetricsMap + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +// skipcq: GSC-G103 +var expungedNodeMetricsMap = unsafe.Pointer(new(mnode.Node)) + +// An entry is a slot in the map corresponding to a particular key. +type entryNodeMetricsMap struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntryNodeMetricsMap(i mnode.Node) *entryNodeMetricsMap { + // skipcq: GSC-G103 + return &entryNodeMetricsMap{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *nodeMetricsMap) Load(key string) (value mnode.Node, ok bool) { + read, _ := m.read.Load().(readOnlyNodeMetricsMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnlyNodeMetricsMap) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, false + } + return e.load() +} + +func (e *entryNodeMetricsMap) load() (value mnode.Node, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedNodeMetricsMap { + return value, false + } + return *(*mnode.Node)(p), true +} + +// Store sets the value for a key. +func (m *nodeMetricsMap) Store(key string, value mnode.Node) { + read, _ := m.read.Load().(readOnlyNodeMetricsMap) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyNodeMetricsMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnlyNodeMetricsMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryNodeMetricsMap(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entryNodeMetricsMap) tryStore(i *mnode.Node) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expungedNodeMetricsMap { + return false + } + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entryNodeMetricsMap) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expungedNodeMetricsMap, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entryNodeMetricsMap) storeLocked(i *mnode.Node) { + // skipcq: GSC-G103 + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *nodeMetricsMap) LoadOrStore(key string, value mnode.Node) (actual mnode.Node, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnlyNodeMetricsMap) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyNodeMetricsMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnlyNodeMetricsMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryNodeMetricsMap(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entryNodeMetricsMap) tryLoadOrStore(i mnode.Node) (actual mnode.Node, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expungedNodeMetricsMap { + return actual, false, false + } + if p != nil { + return *(*mnode.Node)(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expungedNodeMetricsMap { + return actual, false, false + } + if p != nil { + return *(*mnode.Node)(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *nodeMetricsMap) Delete(key string) { + read, _ := m.read.Load().(readOnlyNodeMetricsMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyNodeMetricsMap) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entryNodeMetricsMap) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedNodeMetricsMap { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *nodeMetricsMap) Range(f func(key string, value mnode.Node) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnlyNodeMetricsMap) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnlyNodeMetricsMap) + if read.amended { + read = readOnlyNodeMetricsMap{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *nodeMetricsMap) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnlyNodeMetricsMap{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *nodeMetricsMap) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnlyNodeMetricsMap) + m.dirty = make(map[string]*entryNodeMetricsMap, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entryNodeMetricsMap) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expungedNodeMetricsMap) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expungedNodeMetricsMap +} diff --git a/pkg/discoverer/k8s/service/nodemetricsmap_test.go b/pkg/discoverer/k8s/service/nodemetricsmap_test.go new file mode 100644 index 0000000000..e9ca5570a1 --- /dev/null +++ b/pkg/discoverer/k8s/service/nodemetricsmap_test.go @@ -0,0 +1,553 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +// NOT IMPLEMENTED BELOW +// +// func Test_nodeMetricsMap_Load(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMetricsMap +// misses int +// } +// type want struct { +// wantValue mnode.Node +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, mnode.Node, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue mnode.Node, gotOk bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotOk := m.Load(test.args.key) +// if err := checkFunc(test.want, gotValue, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_nodeMetricsMap_Store(t *testing.T) { +// type args struct { +// key string +// value mnode.Node +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMetricsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Store(test.args.key, test.args.value) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_nodeMetricsMap_LoadOrStore(t *testing.T) { +// type args struct { +// key string +// value mnode.Node +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMetricsMap +// misses int +// } +// type want struct { +// wantActual mnode.Node +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, mnode.Node, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotActual mnode.Node, gotLoaded bool) error { +// if !reflect.DeepEqual(gotActual, w.wantActual) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotActual, w.wantActual) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotActual, gotLoaded := m.LoadOrStore(test.args.key, test.args.value) +// if err := checkFunc(test.want, gotActual, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_nodeMetricsMap_Delete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMetricsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Delete(test.args.key) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_nodeMetricsMap_Range(t *testing.T) { +// type args struct { +// f func(key string, value mnode.Node) bool +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryNodeMetricsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &nodeMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Range(test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/discoverer/k8s/service/podmetricsmap.go b/pkg/discoverer/k8s/service/podmetricsmap.go new file mode 100644 index 0000000000..480423f24e --- /dev/null +++ b/pkg/discoverer/k8s/service/podmetricsmap.go @@ -0,0 +1,392 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +import ( + "sync" + "sync/atomic" + "unsafe" + + mpod "github.com/vdaas/vald/internal/k8s/metrics/pod" +) + +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +type podMetricsMap struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[string]*entryPodMetricsMap + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnlyPodMetricsMap struct { + m map[string]*entryPodMetricsMap + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +// skipcq: GSC-G103 +var expungedPodMetricsMap = unsafe.Pointer(new(mpod.Pod)) + +// An entry is a slot in the map corresponding to a particular key. +type entryPodMetricsMap struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntryPodMetricsMap(i mpod.Pod) *entryPodMetricsMap { + // skipcq: GSC-G103 + return &entryPodMetricsMap{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *podMetricsMap) Load(key string) (value mpod.Pod, ok bool) { + read, _ := m.read.Load().(readOnlyPodMetricsMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnlyPodMetricsMap) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, false + } + return e.load() +} + +func (e *entryPodMetricsMap) load() (value mpod.Pod, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedPodMetricsMap { + return value, false + } + return *(*mpod.Pod)(p), true +} + +// Store sets the value for a key. +func (m *podMetricsMap) Store(key string, value mpod.Pod) { + read, _ := m.read.Load().(readOnlyPodMetricsMap) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyPodMetricsMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnlyPodMetricsMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryPodMetricsMap(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entryPodMetricsMap) tryStore(i *mpod.Pod) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expungedPodMetricsMap { + return false + } + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entryPodMetricsMap) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expungedPodMetricsMap, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entryPodMetricsMap) storeLocked(i *mpod.Pod) { + // skipcq: GSC-G103 + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *podMetricsMap) LoadOrStore(key string, value mpod.Pod) (actual mpod.Pod, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnlyPodMetricsMap) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyPodMetricsMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnlyPodMetricsMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryPodMetricsMap(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entryPodMetricsMap) tryLoadOrStore(i mpod.Pod) (actual mpod.Pod, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expungedPodMetricsMap { + return actual, false, false + } + if p != nil { + return *(*mpod.Pod)(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expungedPodMetricsMap { + return actual, false, false + } + if p != nil { + return *(*mpod.Pod)(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *podMetricsMap) Delete(key string) { + read, _ := m.read.Load().(readOnlyPodMetricsMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyPodMetricsMap) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entryPodMetricsMap) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedPodMetricsMap { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *podMetricsMap) Range(f func(key string, value mpod.Pod) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnlyPodMetricsMap) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnlyPodMetricsMap) + if read.amended { + read = readOnlyPodMetricsMap{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *podMetricsMap) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnlyPodMetricsMap{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *podMetricsMap) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnlyPodMetricsMap) + m.dirty = make(map[string]*entryPodMetricsMap, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entryPodMetricsMap) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expungedPodMetricsMap) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expungedPodMetricsMap +} diff --git a/pkg/discoverer/k8s/service/podmetricsmap_test.go b/pkg/discoverer/k8s/service/podmetricsmap_test.go new file mode 100644 index 0000000000..b66256322c --- /dev/null +++ b/pkg/discoverer/k8s/service/podmetricsmap_test.go @@ -0,0 +1,553 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +// NOT IMPLEMENTED BELOW +// +// func Test_podMetricsMap_Load(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodMetricsMap +// misses int +// } +// type want struct { +// wantValue mpod.Pod +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, mpod.Pod, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue mpod.Pod, gotOk bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotOk := m.Load(test.args.key) +// if err := checkFunc(test.want, gotValue, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_podMetricsMap_Store(t *testing.T) { +// type args struct { +// key string +// value mpod.Pod +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodMetricsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Store(test.args.key, test.args.value) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_podMetricsMap_LoadOrStore(t *testing.T) { +// type args struct { +// key string +// value mpod.Pod +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodMetricsMap +// misses int +// } +// type want struct { +// wantActual mpod.Pod +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, mpod.Pod, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotActual mpod.Pod, gotLoaded bool) error { +// if !reflect.DeepEqual(gotActual, w.wantActual) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotActual, w.wantActual) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotActual, gotLoaded := m.LoadOrStore(test.args.key, test.args.value) +// if err := checkFunc(test.want, gotActual, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_podMetricsMap_Delete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodMetricsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Delete(test.args.key) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_podMetricsMap_Range(t *testing.T) { +// type args struct { +// f func(key string, value mpod.Pod) bool +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodMetricsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podMetricsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Range(test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/discoverer/k8s/service/podsmap.go b/pkg/discoverer/k8s/service/podsmap.go new file mode 100644 index 0000000000..aac514a26f --- /dev/null +++ b/pkg/discoverer/k8s/service/podsmap.go @@ -0,0 +1,392 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/vdaas/vald/internal/k8s/pod" +) + +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +type podsMap struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[string]*entryPodsMap + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnlyPodsMap struct { + m map[string]*entryPodsMap + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +// skipcq: GSC-G103 +var expungedPodsMap = unsafe.Pointer(new([]pod.Pod)) + +// An entry is a slot in the map corresponding to a particular key. +type entryPodsMap struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntryPodsMap(i []pod.Pod) *entryPodsMap { + // skipcq: GSC-G103 + return &entryPodsMap{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *podsMap) Load(key string) (value []pod.Pod, ok bool) { + read, _ := m.read.Load().(readOnlyPodsMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnlyPodsMap) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, false + } + return e.load() +} + +func (e *entryPodsMap) load() (value []pod.Pod, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedPodsMap { + return value, false + } + return *(*[]pod.Pod)(p), true +} + +// Store sets the value for a key. +func (m *podsMap) Store(key string, value []pod.Pod) { + read, _ := m.read.Load().(readOnlyPodsMap) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyPodsMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnlyPodsMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryPodsMap(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entryPodsMap) tryStore(i *[]pod.Pod) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expungedPodsMap { + return false + } + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entryPodsMap) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expungedPodsMap, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entryPodsMap) storeLocked(i *[]pod.Pod) { + // skipcq: GSC-G103 + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *podsMap) LoadOrStore(key string, value []pod.Pod) (actual []pod.Pod, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnlyPodsMap) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyPodsMap) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnlyPodsMap{m: read.m, amended: true}) + } + m.dirty[key] = newEntryPodsMap(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entryPodsMap) tryLoadOrStore(i []pod.Pod) (actual []pod.Pod, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expungedPodsMap { + return actual, false, false + } + if p != nil { + return *(*[]pod.Pod)(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expungedPodsMap { + return actual, false, false + } + if p != nil { + return *(*[]pod.Pod)(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *podsMap) Delete(key string) { + read, _ := m.read.Load().(readOnlyPodsMap) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyPodsMap) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entryPodsMap) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedPodsMap { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *podsMap) Range(f func(key string, value []pod.Pod) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnlyPodsMap) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnlyPodsMap) + if read.amended { + read = readOnlyPodsMap{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *podsMap) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnlyPodsMap{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *podsMap) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnlyPodsMap) + m.dirty = make(map[string]*entryPodsMap, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entryPodsMap) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expungedPodsMap) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expungedPodsMap +} diff --git a/pkg/discoverer/k8s/service/podsmap_test.go b/pkg/discoverer/k8s/service/podsmap_test.go new file mode 100644 index 0000000000..6a2e8ce5f8 --- /dev/null +++ b/pkg/discoverer/k8s/service/podsmap_test.go @@ -0,0 +1,553 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +// NOT IMPLEMENTED BELOW +// +// func Test_podsMap_Load(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodsMap +// misses int +// } +// type want struct { +// wantValue []pod.Pod +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, []pod.Pod, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue []pod.Pod, gotOk bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotOk := m.Load(test.args.key) +// if err := checkFunc(test.want, gotValue, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_podsMap_Store(t *testing.T) { +// type args struct { +// key string +// value []pod.Pod +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Store(test.args.key, test.args.value) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_podsMap_LoadOrStore(t *testing.T) { +// type args struct { +// key string +// value []pod.Pod +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodsMap +// misses int +// } +// type want struct { +// wantActual []pod.Pod +// wantLoaded bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, []pod.Pod, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotActual []pod.Pod, gotLoaded bool) error { +// if !reflect.DeepEqual(gotActual, w.wantActual) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotActual, w.wantActual) +// } +// if !reflect.DeepEqual(gotLoaded, w.wantLoaded) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoaded, w.wantLoaded) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotActual, gotLoaded := m.LoadOrStore(test.args.key, test.args.value) +// if err := checkFunc(test.want, gotActual, gotLoaded); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_podsMap_Delete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Delete(test.args.key) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_podsMap_Range(t *testing.T) { +// type args struct { +// f func(key string, value []pod.Pod) bool +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryPodsMap +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &podsMap{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Range(test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/gateway/lb/handler/grpc/aggregation.go b/pkg/gateway/lb/handler/grpc/aggregation.go index 3779201327..a3abe71bf5 100644 --- a/pkg/gateway/lb/handler/grpc/aggregation.go +++ b/pkg/gateway/lb/handler/grpc/aggregation.go @@ -35,7 +35,6 @@ import ( "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/slices" - valdsync "github.com/vdaas/vald/internal/sync" ) type Aggregator interface { @@ -366,7 +365,7 @@ type valdStdAggr struct { dch chan DistPayload closed atomic.Bool maxDist atomic.Value - visited valdsync.Map[string, any] + visited sync.Map result []*payload.Object_Distance cancel context.CancelFunc } @@ -496,7 +495,7 @@ type valdPairingHeapAggr struct { num int ph *PairingHeap mu sync.Mutex - visited valdsync.Map[string, any] + visited sync.Map result []*payload.Object_Distance } diff --git a/pkg/gateway/lb/service/gateway.go b/pkg/gateway/lb/service/gateway.go index 1b046637e6..72446dd708 100644 --- a/pkg/gateway/lb/service/gateway.go +++ b/pkg/gateway/lb/service/gateway.go @@ -20,6 +20,7 @@ package service import ( "context" "reflect" + "sync" "sync/atomic" "github.com/vdaas/vald/apis/grpc/v1/vald" @@ -28,7 +29,6 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability/trace" - valdsync "github.com/vdaas/vald/internal/sync" ) type Gateway interface { @@ -102,7 +102,7 @@ func (g *gateway) DoMulti(ctx context.Context, num int, } else { limit = uint32(num) } - var visited valdsync.Map[string, any] + var visited sync.Map err = g.client.GetClient().OrderedRange(sctx, addrs, func(ictx context.Context, addr string, conn *grpc.ClientConn, diff --git a/pkg/manager/index/service/indexer.go b/pkg/manager/index/service/indexer.go index 365db870c6..2251c85cfe 100644 --- a/pkg/manager/index/service/indexer.go +++ b/pkg/manager/index/service/indexer.go @@ -21,6 +21,7 @@ import ( "context" "math" "reflect" + "sync" "sync/atomic" "time" @@ -35,7 +36,6 @@ import ( "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/safety" - valdsync "github.com/vdaas/vald/internal/sync" ) type Indexer interface { @@ -54,9 +54,9 @@ type index struct { saveIndexDurationLimit time.Duration saveIndexWaitDuration time.Duration saveIndexTargetAddrCh chan string - schMap valdsync.Map[string, any] + schMap sync.Map concurrency int - indexInfos valdsync.Map[string, *payload.Info_Index_Count] + indexInfos indexInfos indexing atomic.Value // bool minUncommitted uint32 uuidsCount uint32 @@ -276,7 +276,7 @@ func (idx *index) loadInfos(ctx context.Context) (err error) { }() var u, ucu uint32 - var infoMap valdsync.Map[string, *payload.Info_Index_Count] + var infoMap indexInfos err = idx.client.GetClient().RangeConcurrent(ctx, len(idx.client.GetAddrs(ctx)), func(ctx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption, diff --git a/pkg/manager/index/service/indexinfos.go b/pkg/manager/index/service/indexinfos.go new file mode 100644 index 0000000000..7e08b9c59b --- /dev/null +++ b/pkg/manager/index/service/indexinfos.go @@ -0,0 +1,212 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/vdaas/vald/apis/grpc/v1/payload" +) + +type indexInfos struct { + mu sync.Mutex + read atomic.Value + dirty map[string]*entryIndexInfos + misses int +} + +type readOnlyIndexInfos struct { + m map[string]*entryIndexInfos + amended bool +} + +// skipcq: GSC-G103 +var expungedIndexInfos = unsafe.Pointer(new(*payload.Info_Index_Count)) + +type entryIndexInfos struct { + p unsafe.Pointer +} + +func newEntryIndexInfos(i *payload.Info_Index_Count) *entryIndexInfos { + // skipcq: GSC-G103 + return &entryIndexInfos{p: unsafe.Pointer(&i)} +} + +func (m *indexInfos) Load(key string) (value *payload.Info_Index_Count, ok bool) { + read, _ := m.read.Load().(readOnlyIndexInfos) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyIndexInfos) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, false + } + return e.load() +} + +func (e *entryIndexInfos) load() (value *payload.Info_Index_Count, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedIndexInfos { + return value, false + } + return *(**payload.Info_Index_Count)(p), true +} + +func (m *indexInfos) Store(key string, value *payload.Info_Index_Count) { + read, _ := m.read.Load().(readOnlyIndexInfos) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnlyIndexInfos) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + m.dirtyLocked() + m.read.Store(readOnlyIndexInfos{m: read.m, amended: true}) + } + m.dirty[key] = newEntryIndexInfos(value) + } + m.mu.Unlock() +} + +func (e *entryIndexInfos) tryStore(i **payload.Info_Index_Count) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expungedIndexInfos { + return false + } + // skipcq: GSC-G103 + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +func (e *entryIndexInfos) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expungedIndexInfos, nil) +} + +func (e *entryIndexInfos) storeLocked(i **payload.Info_Index_Count) { + // skipcq: GSC-G103 + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +func (m *indexInfos) Delete(key string) { + read, _ := m.read.Load().(readOnlyIndexInfos) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyIndexInfos) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entryIndexInfos) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expungedIndexInfos { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +func (m *indexInfos) Range(f func(key string, value *payload.Info_Index_Count) bool) { + read, _ := m.read.Load().(readOnlyIndexInfos) + if read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnlyIndexInfos) + if read.amended { + read = readOnlyIndexInfos{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *indexInfos) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnlyIndexInfos{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *indexInfos) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnlyIndexInfos) + m.dirty = make(map[string]*entryIndexInfos, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entryIndexInfos) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expungedIndexInfos) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expungedIndexInfos +} diff --git a/pkg/manager/index/service/indexinfos_test.go b/pkg/manager/index/service/indexinfos_test.go new file mode 100644 index 0000000000..0922d8fbce --- /dev/null +++ b/pkg/manager/index/service/indexinfos_test.go @@ -0,0 +1,439 @@ +// +// Copyright (C) 2019-2023 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +// NOT IMPLEMENTED BELOW +// +// func Test_indexInfos_Load(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexInfos +// misses int +// } +// type want struct { +// wantValue *payload.Info_Index_Count +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Info_Index_Count, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotValue *payload.Info_Index_Count, gotOk bool) error { +// if !reflect.DeepEqual(gotValue, w.wantValue) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotValue, w.wantValue) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexInfos{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// gotValue, gotOk := m.Load(test.args.key) +// if err := checkFunc(test.want, gotValue, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_indexInfos_Store(t *testing.T) { +// type args struct { +// key string +// value *payload.Info_Index_Count +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexInfos +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// value:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexInfos{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Store(test.args.key, test.args.value) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_indexInfos_Delete(t *testing.T) { +// type args struct { +// key string +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexInfos +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// key:"", +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexInfos{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Delete(test.args.key) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_indexInfos_Range(t *testing.T) { +// type args struct { +// f func(key string, value *payload.Info_Index_Count) bool +// } +// type fields struct { +// read atomic.Value +// dirty map[string]*entryIndexInfos +// misses int +// } +// type want struct { +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// read:nil, +// dirty:nil, +// misses:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// m := &indexInfos{ +// read: test.fields.read, +// dirty: test.fields.dirty, +// misses: test.fields.misses, +// } +// +// m.Range(test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// }