diff --git a/src/config/config.go b/src/config/config.go index 83ed972b..49b5e280 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -3,7 +3,7 @@ package config import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" ) @@ -14,19 +14,10 @@ func (e RateLimitConfigError) Error() string { return string(e) } -// Stats for an individual rate limit config entry. -type RateLimitStats struct { - TotalHits stats.Counter - OverLimit stats.Counter - NearLimit stats.Counter - OverLimitWithLocalCache stats.Counter - WithinLimit stats.Counter -} - // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { FullKey string - Stats RateLimitStats + Stats stats.RateLimitStats Limit *pb.RateLimitResponse_RateLimit } @@ -56,5 +47,5 @@ type RateLimitConfigLoader interface { // @param statsScope supplies the stats scope to use for limit stats during runtime. // @return a new configuration. // @throws RateLimitConfigError if the configuration could not be created. - Load(configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig + Load(configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig } diff --git a/src/config/config_impl.go b/src/config/config_impl.go index b5118f5e..6f1c67ba 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -6,7 +6,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/stats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "gopkg.in/yaml.v2" @@ -39,8 +39,8 @@ type rateLimitDomain struct { } type rateLimitConfigImpl struct { - domains map[string]*rateLimitDomain - statsScope stats.Scope + domains map[string]*rateLimitDomain + manager stats.Manager } var validKeys = map[string]bool{ @@ -53,30 +53,15 @@ var validKeys = map[string]bool{ "requests_per_unit": true, } -// Create new rate limit stats for a config entry. -// @param statsScope supplies the owning scope. -// @param key supplies the fully resolved key name of the entry. -// @return new stats. -func newRateLimitStats(statsScope stats.Scope, key string) RateLimitStats { - ret := RateLimitStats{} - ret.TotalHits = statsScope.NewCounter(key + ".total_hits") - ret.OverLimit = statsScope.NewCounter(key + ".over_limit") - ret.NearLimit = statsScope.NewCounter(key + ".near_limit") - ret.OverLimitWithLocalCache = statsScope.NewCounter(key + ".over_limit_with_local_cache") - ret.WithinLimit = statsScope.NewCounter(key + ".within_limit") - return ret -} - // Create a new rate limit config entry. // @param requestsPerUnit supplies the requests per unit of time for the entry. // @param unit supplies the unit of time for the entry. -// @param key supplies the fully resolved key name of the entry. -// @param scope supplies the owning scope. +// @param rlStats supplies the stats structure associated with the RateLimit // @return the new config entry. func NewRateLimit( - requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, key string, scope stats.Scope) *RateLimit { + requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit { - return &RateLimit{FullKey: key, Stats: newRateLimitStats(scope, key), Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} + return &RateLimit{FullKey: rlStats.String(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} } // Dump an individual descriptor for debugging purposes. @@ -104,10 +89,8 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit // @param config supplies the config file that owns the descriptor. // @param parentKey supplies the fully resolved key name that owns this config level. // @param descriptors supplies the YAML descriptors to load. -// @param statsScope supplies the owning scope. -func (this *rateLimitDescriptor) loadDescriptors( - config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, - statsScope stats.Scope) { +// @param manager that owns the stats.Scope. +func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, manager stats.Manager) { for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { @@ -138,8 +121,7 @@ func (this *rateLimitDescriptor) loadDescriptors( } rateLimit = NewRateLimit( - descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), newParentKey, - statsScope) + descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), manager.NewStats(newParentKey)) rateLimitDebugString = fmt.Sprintf( " ratelimit={requests_per_unit=%d, unit=%s}", rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit.String()) @@ -148,8 +130,7 @@ func (this *rateLimitDescriptor) loadDescriptors( logger.Debugf( "loading descriptor: key=%s%s", newParentKey, rateLimitDebugString) newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit} - newDescriptor.loadDescriptors( - config, newParentKey+".", descriptorConfig.Descriptors, statsScope) + newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, manager) this.descriptors[finalKey] = newDescriptor } } @@ -229,24 +210,10 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { logger.Debugf("loading domain: %s", root.Domain) newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}} - newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsScope) + newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.manager) this.domains[root.Domain] = newDomain } -func (this *rateLimitConfigImpl) descriptorToKey(descriptor *pb_struct.RateLimitDescriptor) string { - rateLimitKey := "" - for _, entry := range descriptor.Entries { - if rateLimitKey != "" { - rateLimitKey += "." - } - rateLimitKey += entry.Key - if entry.Value != "" { - rateLimitKey += "_" + entry.Value - } - } - return rateLimitKey -} - func (this *rateLimitConfigImpl) Dump() string { ret := "" for _, domain := range this.domains { @@ -268,13 +235,12 @@ func (this *rateLimitConfigImpl) GetLimit( } if descriptor.GetLimit() != nil { - rateLimitKey := domain + "." + this.descriptorToKey(descriptor) + rateLimitKey := stats.DescriptorKey(domain, descriptor) rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit()) rateLimit = NewRateLimit( descriptor.GetLimit().GetRequestsPerUnit(), rateLimitOverrideUnit, - rateLimitKey, - this.statsScope) + this.manager.NewStats(rateLimitKey)) return rateLimit } @@ -316,9 +282,9 @@ func (this *rateLimitConfigImpl) GetLimit( // @param stats supplies the stats scope to use for limit stats during runtime. // @return a new config. func NewRateLimitConfigImpl( - configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig { + configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig { - ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsScope} + ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, manager} for _, config := range configs { ret.loadConfig(config) } @@ -329,9 +295,9 @@ func NewRateLimitConfigImpl( type rateLimitConfigLoaderImpl struct{} func (this *rateLimitConfigLoaderImpl) Load( - configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig { + configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig { - return NewRateLimitConfigImpl(configs, statsScope) + return NewRateLimitConfigImpl(configs, manager) } // @return a new default config loader implementation. diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index f9f3c742..d5751cc7 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -3,12 +3,14 @@ package main import ( "flag" "fmt" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" "io/ioutil" "os" "path/filepath" "github.com/envoyproxy/ratelimit/src/config" - "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" ) func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { @@ -19,9 +21,9 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { os.Exit(1) } }() - - dummyStats := stats.NewStore(stats.NewNullSink(), false) - config.NewRateLimitConfigImpl(allConfigs, dummyStats) + settingStruct := settings.NewSettings() + manager := stats.NewStatManager(gostats.NewStore(gostats.NewNullSink(), false), settingStruct) + config.NewRateLimitConfigImpl(allConfigs, manager) } func main() { diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 44c2633e..b13cde15 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -5,6 +5,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" logger "github.com/sirupsen/logrus" "math" @@ -18,6 +19,7 @@ type BaseRateLimiter struct { cacheKeyGenerator CacheKeyGenerator localCache *freecache.Cache nearLimitRatio float32 + Manager stats.Manager } type LimitInfo struct { @@ -47,7 +49,7 @@ func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { - limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) + this.Manager.AddTotalHits(uint64(hitsAddend), limits[i].Stats, stats.DescriptorKey(request.Domain, request.Descriptors[i])) } } return cacheKeys @@ -67,15 +69,15 @@ func (this *BaseRateLimiter) IsOverLimitWithLocalCache(key string) bool { // Generates response descriptor status based on cache key, over the limit with local cache, over the limit and // near the limit thresholds. Thresholds are checked in order and are mutually exclusive. -func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo, - isOverLimitWithLocalCache bool, hitsAddend uint32) *pb.RateLimitResponse_DescriptorStatus { - if key == "" { +func (this *BaseRateLimiter) GetResponseDescriptorStatus(localCacheKey string, limitInfo *LimitInfo, + isOverLimitWithLocalCache bool, hitsAddend uint32, descriptorKey string) *pb.RateLimitResponse_DescriptorStatus { + if localCacheKey == "" { return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, nil, 0) } if isOverLimitWithLocalCache { - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) - limitInfo.limit.Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) + this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) + this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) } @@ -84,12 +86,12 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * // The nearLimitThreshold is the number of requests that can be made before hitting the nearLimitRatio. // We need to know it in both the OK and OVER_LIMIT scenarios. limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) - logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease) + logger.Debugf("cache localCacheKey: %s current: %d", localCacheKey, limitInfo.limitAfterIncrease) if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) - checkOverLimitThreshold(limitInfo, hitsAddend) + this.checkOverLimitThreshold(limitInfo, hitsAddend, descriptorKey) if this.localCache != nil { // Set the TTL of the local_cache to be the entire duration. @@ -99,9 +101,9 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. - err := this.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit))) + err := this.localCache.Set([]byte(localCacheKey), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit))) if err != nil { - logger.Errorf("Failing to set local cache key: %s", key) + logger.Errorf("Failing to set local cache localCacheKey: %s", localCacheKey) } } } else { @@ -109,14 +111,14 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) // The limit is OK but we additionally want to know if we are near the limit. - checkNearLimitThreshold(limitInfo, hitsAddend) - limitInfo.limit.Stats.WithinLimit.Add(uint64(hitsAddend)) + this.checkNearLimitThreshold(limitInfo, hitsAddend, descriptorKey) + this.Manager.AddWithinLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) } return responseDescriptorStatus } func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, - localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *BaseRateLimiter { + localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stats.Manager) *BaseRateLimiter { return &BaseRateLimiter{ timeSource: timeSource, JitterRand: jitterRand, @@ -124,37 +126,41 @@ func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expira cacheKeyGenerator: NewCacheKeyGenerator(cacheKeyPrefix), localCache: localCache, nearLimitRatio: nearLimitRatio, + Manager: manager, } } -func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, descriptorKey string) { // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the // N hits was over the limit, then all the N hits were over limit. // Otherwise, only the difference between the current limit value and the over limit threshold // were over limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) + this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) } else { - limitInfo.limit.Stats.OverLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold)) + this.Manager.AddOverLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.overLimitThreshold), limitInfo.limit.Stats, descriptorKey) // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) + this.Manager.AddNearLimit( + uint64(limitInfo.overLimitThreshold-utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)), + limitInfo.limit.Stats, + descriptorKey, + ) } } -func checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, descriptorKey string) { if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold { // Here we also need to assess which portion of the hitsAddend were in the near limit range. // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, // only the difference between the current limit value and the near limit threshold were near // limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold { - limitInfo.limit.Stats.NearLimit.Add(uint64(hitsAddend)) + this.Manager.AddNearLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) } else { - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold)) + this.Manager.AddNearLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.nearLimitThreshold), limitInfo.limit.Stats, descriptorKey) } } } diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 892565e8..2d3935d5 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -17,12 +17,13 @@ package memcached import ( "context" + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "strconv" "sync" "github.com/coocood/freecache" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/bradfitz/gomemcache/memcache" @@ -117,7 +118,7 @@ func (this *rateLimitMemcacheImpl) DoLimit( limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + limitInfo, isOverLimitWithLocalCache[i], hitsAddend, stats.DescriptorKey(request.Domain, request.Descriptors[i])) } this.waitGroup.Add(1) @@ -174,7 +175,7 @@ func (this *rateLimitMemcacheImpl) Flush() { } func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, manager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { return &rateLimitMemcacheImpl{ client: client, timeSource: timeSource, @@ -182,19 +183,19 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix), + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, manager), } } func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, - localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache { + localCache *freecache.Cache, scope gostats.Scope, manager stats.Manager) limiter.RateLimitCache { return NewRateLimitCacheImpl( CollectStats(memcache.New(s.MemcacheHostPort), scope.Scope("memcache")), timeSource, jitterRand, s.ExpirationJitterMaxSeconds, localCache, - scope, + manager, s.NearLimitRatio, s.CacheKeyPrefix, ) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 7e619b66..a6ddfe85 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" @@ -10,7 +11,7 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) -func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache { +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, manager stats.Manager) limiter.RateLimitCache { var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, @@ -29,5 +30,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca localCache, s.NearLimitRatio, s.CacheKeyPrefix, + manager, ) } diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index b2b3d3d2..625db829 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" @@ -96,7 +97,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + limitInfo, isOverLimitWithLocalCache[i], hitsAddend, stats.DescriptorKey(request.Domain, request.Descriptors[i])) } @@ -107,10 +108,10 @@ func (this *fixedRateLimitCacheImpl) DoLimit( func (this *fixedRateLimitCacheImpl) Flush() {} func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource, - jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stats.Manager) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix), + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, manager), } } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index b60d1e32..ba3abdfc 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -4,6 +4,7 @@ import ( "bytes" "expvar" "fmt" + "github.com/envoyproxy/ratelimit/src/stats" "io" "net/http" "net/http/pprof" @@ -25,7 +26,7 @@ import ( "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" "github.com/lyft/goruntime/loader" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/health" @@ -44,8 +45,8 @@ type server struct { debugPort int router *mux.Router grpcServer *grpc.Server - store stats.Store - scope stats.Scope + store gostats.Store + scope gostats.Scope runtime loader.IFace debugListener serverDebugListener httpServer *http.Server @@ -160,7 +161,7 @@ func (server *server) startGrpc() { server.grpcServer.Serve(lis) } -func (server *server) Scope() stats.Scope { +func (server *server) Scope() gostats.Scope { return server.scope } @@ -168,11 +169,11 @@ func (server *server) Runtime() loader.IFace { return server.runtime } -func NewServer(s settings.Settings, name string, store stats.Store, localCache *freecache.Cache, opts ...settings.Option) Server { - return newServer(s, name, store, localCache, opts...) +func NewServer(s settings.Settings, name string, manager stats.Manager, localCache *freecache.Cache, opts ...settings.Option) Server { + return newServer(s, name, manager, localCache, opts...) } -func newServer(s settings.Settings, name string, store stats.Store, localCache *freecache.Cache, opts ...settings.Option) *server { +func newServer(s settings.Settings, name string, manager stats.Manager, localCache *freecache.Cache, opts ...settings.Option) *server { for _, opt := range opts { opt(&s) } @@ -186,9 +187,9 @@ func newServer(s settings.Settings, name string, store stats.Store, localCache * ret.debugPort = s.DebugPort // setup stats - ret.store = store + ret.store = manager.GetStatsStore() ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) - ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) + ret.store.AddStatGenerator(gostats.NewRuntimeStats(ret.scope.Scope("go"))) if localCache != nil { ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 126bb776..00d421af 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -2,6 +2,7 @@ package ratelimit import ( "fmt" + "github.com/envoyproxy/ratelimit/src/stats" "strings" "sync" @@ -11,37 +12,10 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/lyft/goruntime/loader" - stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) -type shouldRateLimitStats struct { - redisError stats.Counter - serviceError stats.Counter -} - -func newShouldRateLimitStats(scope stats.Scope) shouldRateLimitStats { - ret := shouldRateLimitStats{} - ret.redisError = scope.NewCounter("redis_error") - ret.serviceError = scope.NewCounter("service_error") - return ret -} - -type serviceStats struct { - configLoadSuccess stats.Counter - configLoadError stats.Counter - shouldRateLimit shouldRateLimitStats -} - -func newServiceStats(scope stats.Scope) serviceStats { - ret := serviceStats{} - ret.configLoadSuccess = scope.NewCounter("config_load_success") - ret.configLoadError = scope.NewCounter("config_load_error") - ret.shouldRateLimit = newShouldRateLimitStats(scope.Scope("call.should_rate_limit")) - return ret -} - type RateLimitServiceServer interface { pb.RateLimitServiceServer GetCurrentConfig() config.RateLimitConfig @@ -55,13 +29,12 @@ type service struct { config config.RateLimitConfig runtimeUpdateEvent chan int cache limiter.RateLimitCache - stats serviceStats - rlStatsScope stats.Scope + stats stats.ServiceStats legacy *legacyService runtimeWatchRoot bool } -func (this *service) reloadConfig() { +func (this *service) reloadConfig(manager stats.Manager) { defer func() { if e := recover(); e != nil { configError, ok := e.(config.RateLimitConfigError) @@ -69,7 +42,7 @@ func (this *service) reloadConfig() { panic(e) } - this.stats.configLoadError.Inc() + this.stats.ConfigLoadError.Inc() logger.Errorf("error loading new configuration from runtime: %s", configError.Error()) } }() @@ -84,8 +57,8 @@ func (this *service) reloadConfig() { files = append(files, config.RateLimitConfigToLoad{key, snapshot.Get(key)}) } - newConfig := this.configLoader.Load(files, this.rlStatsScope) - this.stats.configLoadSuccess.Inc() + newConfig := this.configLoader.Load(files, manager) + this.stats.ConfigLoadSuccess.Inc() this.configLock.Lock() this.config = newConfig this.configLock.Unlock() @@ -170,12 +143,12 @@ func (this *service) ShouldRateLimit( switch t := err.(type) { case redis.RedisError: { - this.stats.shouldRateLimit.redisError.Inc() + this.stats.ShouldRateLimit.RedisError.Inc() finalError = t } case serviceError: { - this.stats.shouldRateLimit.serviceError.Inc() + this.stats.ShouldRateLimit.ServiceError.Inc() finalError = t } default: @@ -199,7 +172,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { } func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, stats stats.Scope, runtimeWatchRoot bool) RateLimitServiceServer { + configLoader config.RateLimitConfigLoader, manager stats.Manager, runtimeWatchRoot bool) RateLimitServiceServer { newService := &service{ runtime: runtime, @@ -208,25 +181,24 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache, config: nil, runtimeUpdateEvent: make(chan int), cache: cache, - stats: newServiceStats(stats), - rlStatsScope: stats.Scope("rate_limit"), + stats: manager.NewServiceStats(), runtimeWatchRoot: runtimeWatchRoot, } newService.legacy = &legacyService{ s: newService, - shouldRateLimitLegacyStats: newShouldRateLimitLegacyStats(stats), + shouldRateLimitLegacyStats: manager.NewShouldRateLimitLegacyStats(), } runtime.AddUpdateCallback(newService.runtimeUpdateEvent) - newService.reloadConfig() + newService.reloadConfig(manager) go func() { // No exit right now. for { logger.Debugf("waiting for runtime update") <-newService.runtimeUpdateEvent logger.Debugf("got runtime update and reloading config") - newService.reloadConfig() + newService.reloadConfig(manager) } }() diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index 17112675..ac3971e0 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -5,7 +5,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" ) @@ -17,22 +17,7 @@ type RateLimitLegacyServiceServer interface { // the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. type legacyService struct { s *service - shouldRateLimitLegacyStats shouldRateLimitLegacyStats -} - -type shouldRateLimitLegacyStats struct { - reqConversionError stats.Counter - respConversionError stats.Counter - shouldRateLimitError stats.Counter -} - -func newShouldRateLimitLegacyStats(scope stats.Scope) shouldRateLimitLegacyStats { - s := scope.Scope("call.should_rate_limit_legacy") - return shouldRateLimitLegacyStats{ - reqConversionError: s.NewCounter("req_conversion_error"), - respConversionError: s.NewCounter("resp_conversion_error"), - shouldRateLimitError: s.NewCounter("should_rate_limit_error"), - } + shouldRateLimitLegacyStats stats.ShouldRateLimitLegacyStats } func (this *legacyService) ShouldRateLimit( @@ -41,18 +26,18 @@ func (this *legacyService) ShouldRateLimit( request, err := ConvertLegacyRequest(legacyRequest) if err != nil { - this.shouldRateLimitLegacyStats.reqConversionError.Inc() + this.shouldRateLimitLegacyStats.ReqConversionError.Inc() return nil, err } resp, err := this.s.ShouldRateLimit(ctx, request) if err != nil { - this.shouldRateLimitLegacyStats.shouldRateLimitError.Inc() + this.shouldRateLimitLegacyStats.ShouldRateLimitError.Inc() return nil, err } legacyResponse, err := ConvertResponse(resp) if err != nil { - this.shouldRateLimitLegacyStats.respConversionError.Inc() + this.shouldRateLimitLegacyStats.RespConversionError.Inc() return nil, err } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index afa1b144..e45ec392 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -1,6 +1,7 @@ package runner import ( + "github.com/envoyproxy/ratelimit/src/stats" "io" "math/rand" "net/http" @@ -8,7 +9,7 @@ import ( "sync" "time" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/coocood/freecache" @@ -27,24 +28,24 @@ import ( ) type Runner struct { - statsStore stats.Store - settings settings.Settings - srv server.Server - mu sync.Mutex + manager stats.Manager + settings settings.Settings + srv server.Server + mu sync.Mutex } func NewRunner(s settings.Settings) Runner { return Runner{ - statsStore: stats.NewDefaultStore(), - settings: s, + manager: stats.NewStatManager(gostats.NewDefaultStore(), s), + settings: s, } } -func (runner *Runner) GetStatsStore() stats.Store { - return runner.statsStore +func (runner *Runner) GetStatsStore() gostats.Store { + return runner.manager.GetStatsStore() } -func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache) limiter.RateLimitCache { +func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, manager stats.Manager) limiter.RateLimitCache { switch s.BackendType { case "redis", "": return redis.NewRateLimiterCacheImplFromSettings( @@ -53,14 +54,16 @@ func createLimiter(srv server.Server, s settings.Settings, localCache *freecache srv, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), - s.ExpirationJitterMaxSeconds) + s.ExpirationJitterMaxSeconds, + manager) case "memcache": return memcached.NewRateLimitCacheImplFromSettings( s, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), localCache, - srv.Scope()) + srv.Scope(), + manager) default: logger.Fatalf("Invalid setting for BackendType: %s", s.BackendType) panic("This line should not be reachable") @@ -91,16 +94,16 @@ func (runner *Runner) Run() { localCache = freecache.NewCache(s.LocalCacheSizeInBytes) } - srv := server.NewServer(s, "ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) + srv := server.NewServer(s, "ratelimit", runner.manager, localCache, settings.GrpcUnaryInterceptor(nil)) runner.mu.Lock() runner.srv = srv runner.mu.Unlock() service := ratelimit.NewService( srv.Runtime(), - createLimiter(srv, s, localCache), + createLimiter(srv, s, localCache, runner.manager), config.NewRateLimitConfigLoaderImpl(), - srv.Scope().Scope("service"), + runner.manager, s.RuntimeWatchRoot, ) diff --git a/src/settings/settings.go b/src/settings/settings.go index 8468076e..40c755bd 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -59,6 +59,9 @@ type Settings struct { // Memcache settings MemcacheHostPort string `envconfig:"MEMCACHE_HOST_PORT" default:""` + + //Detailed Metrics Mode + DetailedMetrics bool `envconfig:"DETAILED_METRICS_MODE" default:"false"` } type Option func(*Settings) diff --git a/src/stats/manager.go b/src/stats/manager.go new file mode 100644 index 00000000..5b655897 --- /dev/null +++ b/src/stats/manager.go @@ -0,0 +1,17 @@ +package stats + +import stats "github.com/lyft/gostats" + +type Manager interface { + AddTotalHits(u uint64, rlStats RateLimitStats, key string) + AddOverLimit(u uint64, rlStats RateLimitStats, key string) + AddNearLimit(u uint64, rlStats RateLimitStats, key string) + AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, key string) + AddWithinLimit(u uint64, rlStats RateLimitStats, key string) + NewStats(key string) RateLimitStats + NewShouldRateLimitStats() ShouldRateLimitStats + NewServiceStats() ServiceStats + NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats + GetStatsStore() stats.Store + NewDetailedStats(key string) RateLimitStats +} diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go new file mode 100644 index 00000000..a4109a80 --- /dev/null +++ b/src/stats/manager_impl.go @@ -0,0 +1,180 @@ +package stats + +import ( + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/settings" + gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" +) + +func NewStatManager(store gostats.Store, s settings.Settings) *ManagerImpl { + logger.Infof("Initializing Stat Manager with detailed metrics %t", s.DetailedMetrics) + serviceScope := store.ScopeWithTags("ratelimit", s.ExtraTags).Scope("service") + return &ManagerImpl{ + store: store, + rlStatsScope: serviceScope.Scope("rate_limit"), + legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), + serviceStatsScope: serviceScope, + detailedMetricsScope: serviceScope.Scope("rate_limit").Scope("detailed"), + detailed: s.DetailedMetrics, + } +} + +type ManagerImpl struct { + store gostats.Store + rlStatsScope gostats.Scope + legacyStatsScope gostats.Scope + serviceStatsScope gostats.Scope + detailedMetricsScope gostats.Scope + detailed bool +} + +func (this *ManagerImpl) GetStatsStore() gostats.Store { + return this.store +} + +func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, key string) { + rlStats.TotalHits.Add(u) + if this.detailed { + stat := this.getDescriptorStat(key) + stat.TotalHits.Add(u) + } +} + +func (this *ManagerImpl) AddOverLimit(u uint64, rlStats RateLimitStats, key string) { + rlStats.OverLimit.Add(u) + if this.detailed { + stat := this.getDescriptorStat(key) + stat.OverLimit.Add(u) + } +} + +func (this *ManagerImpl) AddNearLimit(u uint64, rlStats RateLimitStats, key string) { + rlStats.NearLimit.Add(u) + if this.detailed { + stat := this.getDescriptorStat(key) + stat.NearLimit.Add(u) + } +} + +func (this *ManagerImpl) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, key string) { + rlStats.OverLimitWithLocalCache.Add(u) + if this.detailed { + stat := this.getDescriptorStat(key) + stat.OverLimitWithLocalCache.Add(u) + } +} + +func (this *ManagerImpl) AddWithinLimit(u uint64, rlStats RateLimitStats, key string) { + rlStats.WithinLimit.Add(u) + if this.detailed { + stat := this.getDescriptorStat(key) + stat.WithinLimit.Add(u) + } +} + +//todo: consider adding a RateLimitStats cache +//todo: consider adding descriptor fields parameter to allow configuration of descriptor entries for which metrics will be emited. +func (this *ManagerImpl) getDescriptorStat(key string) RateLimitStats { + ret := this.NewDetailedStats(key) + return ret +} + +// Create new rate descriptor stats for a descriptor tuple. +// @param key supplies the fully resolved descriptor tuple. +// @return new stats. +func (this *ManagerImpl) NewStats(key string) RateLimitStats { + ret := RateLimitStats{} + logger.Debugf("Creating stats for key: '%s'", key) + ret.Key = key + ret.TotalHits = this.rlStatsScope.NewCounter(key + ".total_hits") + ret.OverLimit = this.rlStatsScope.NewCounter(key + ".over_limit") + ret.NearLimit = this.rlStatsScope.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = this.rlStatsScope.NewCounter(key + ".over_limit_with_local_cache") + ret.WithinLimit = this.rlStatsScope.NewCounter(key + ".within_limit") + return ret +} + +func (this *ManagerImpl) NewDetailedStats(key string) RateLimitStats { + ret := RateLimitStats{} + logger.Debugf("Creating detailed stats for key: '%s'", key) + ret.Key = key + ret.TotalHits = this.detailedMetricsScope.NewCounter(key + ".total_hits") + ret.OverLimit = this.detailedMetricsScope.NewCounter(key + ".over_limit") + ret.NearLimit = this.detailedMetricsScope.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = this.detailedMetricsScope.NewCounter(key + ".over_limit_with_local_cache") + return ret + +} + +type ShouldRateLimitLegacyStats struct { + ReqConversionError gostats.Counter + RespConversionError gostats.Counter + ShouldRateLimitError gostats.Counter +} + +func (this *ManagerImpl) NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats { + return ShouldRateLimitLegacyStats{ + ReqConversionError: this.legacyStatsScope.NewCounter("req_conversion_error"), + RespConversionError: this.legacyStatsScope.NewCounter("resp_conversion_error"), + ShouldRateLimitError: this.legacyStatsScope.NewCounter("should_rate_limit_error"), + } +} + +type ShouldRateLimitStats struct { + RedisError gostats.Counter + ServiceError gostats.Counter +} + +func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { + s := this.serviceStatsScope.Scope("call.should_rate_limit") + ret := ShouldRateLimitStats{} + ret.RedisError = s.NewCounter("redis_error") + ret.ServiceError = s.NewCounter("service_error") + return ret +} + +type ServiceStats struct { + ConfigLoadSuccess gostats.Counter + ConfigLoadError gostats.Counter + ShouldRateLimit ShouldRateLimitStats +} + +func (this *ManagerImpl) NewServiceStats() ServiceStats { + ret := ServiceStats{} + ret.ConfigLoadSuccess = this.serviceStatsScope.NewCounter("config_load_success") + ret.ConfigLoadError = this.serviceStatsScope.NewCounter("config_load_error") + ret.ShouldRateLimit = this.NewShouldRateLimitStats() + return ret +} + +func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) string { + rateLimitKey := "" + for _, entry := range descriptor.Entries { + if rateLimitKey != "" { + rateLimitKey += "." + } + rateLimitKey += entry.Key + if entry.Value != "" { + rateLimitKey += "_" + entry.Value + } + } + return domain + "." + rateLimitKey +} + +// Stats for an individual rate limit config entry. +//todo: Ideally the gostats package fields should be unexported +// the inner value could be interacted with via getters such as rlStats.TotalHits() uint64 +// This ensures that setters such as Inc() and Add() can only be managed by ManagerImpl. +type RateLimitStats struct { + Key string + TotalHits gostats.Counter + OverLimit gostats.Counter + NearLimit gostats.Counter + OverLimitWithLocalCache gostats.Counter + WithinLimit gostats.Counter +} + +func (this RateLimitStats) String() string { + return this.Key +} diff --git a/test/config/config_test.go b/test/config/config_test.go index 4a244bce..e0f2a2e0 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -9,6 +9,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/envoyproxy/ratelimit/src/config" + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" "github.com/lyft/gostats" "github.com/stretchr/testify/assert" ) @@ -23,8 +24,8 @@ func loadFile(path string) []config.RateLimitConfigToLoad { func TestBasicConfig(t *testing.T) { assert := assert.New(t) - stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), stats) + newStore := stats.NewStore(stats.NewNullSink(), false) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(newStore)) rlConfig.Dump() assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{})) assert.Nil(rlConfig.GetLimit(nil, "test-domain", &pb_struct.RateLimitDescriptor{})) @@ -68,10 +69,10 @@ func TestBasicConfig(t *testing.T) { rl.Stats.WithinLimit.Inc() assert.EqualValues(5, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_SECOND, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.near_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.within_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1.within_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -85,13 +86,13 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(10, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_SECOND, rl.Limit.Unit) assert.EqualValues( - 1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.total_hits").Value()) + 1, newStore.NewCounter("test-domain.key1_value1.subkey1_subvalue1.total_hits").Value()) assert.EqualValues( - 1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.over_limit").Value()) + 1, newStore.NewCounter("test-domain.key1_value1.subkey1_subvalue1.over_limit").Value()) assert.EqualValues( - 1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.near_limit").Value()) + 1, newStore.NewCounter("test-domain.key1_value1.subkey1_subvalue1.near_limit").Value()) assert.EqualValues( - 1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.within_limit").Value()) + 1, newStore.NewCounter("test-domain.key1_value1.subkey1_subvalue1.within_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -104,10 +105,10 @@ func TestBasicConfig(t *testing.T) { rl.Stats.WithinLimit.Inc() assert.EqualValues(20, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_MINUTE, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key2.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2.near_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2.within_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2.within_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -120,10 +121,10 @@ func TestBasicConfig(t *testing.T) { rl.Stats.WithinLimit.Inc() assert.EqualValues(30, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_MINUTE, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.near_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.within_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2_value2.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2_value2.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2_value2.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2_value2.within_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -143,10 +144,10 @@ func TestBasicConfig(t *testing.T) { rl.Stats.WithinLimit.Inc() assert.EqualValues(1, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_HOUR, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key3.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key3.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key3.near_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key3.within_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key3.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key3.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key3.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key3.within_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -159,16 +160,16 @@ func TestBasicConfig(t *testing.T) { rl.Stats.WithinLimit.Inc() assert.EqualValues(1, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_DAY, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key4.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key4.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key4.near_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key4.within_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key4.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key4.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key4.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key4.within_limit").Value()) } func TestConfigLimitOverride(t *testing.T) { assert := assert.New(t) - stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), stats) + newStore := stats.NewStore(stats.NewNullSink(), false) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(newStore)) rlConfig.Dump() // No matching domain assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{ @@ -193,10 +194,10 @@ func TestConfigLimitOverride(t *testing.T) { rl.Stats.OverLimit.Inc() rl.Stats.NearLimit.Inc() rl.Stats.WithinLimit.Inc() - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.within_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something.within_limit").Value()) // Change in override value doesn't erase stats rl = rlConfig.GetLimit( @@ -216,10 +217,10 @@ func TestConfigLimitOverride(t *testing.T) { RequestsPerUnit: 42, Unit: pb.RateLimitResponse_RateLimit_HOUR, }, rl.Limit) - assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) - assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) - assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) - assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.within_limit").Value()) + assert.EqualValues(2, newStore.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) + assert.EqualValues(2, newStore.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) + assert.EqualValues(2, newStore.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) + assert.EqualValues(2, newStore.NewCounter("test-domain.key1_value1.subkey1_something.within_limit").Value()) // Different value creates a different counter rl = rlConfig.GetLimit( @@ -239,10 +240,10 @@ func TestConfigLimitOverride(t *testing.T) { rl.Stats.OverLimit.Inc() rl.Stats.NearLimit.Inc() rl.Stats.WithinLimit.Inc() - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.near_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.within_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something_else.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something_else.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something_else.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something_else.within_limit").Value()) } func expectConfigPanic(t *testing.T, call func(), expectedError string) { @@ -261,7 +262,7 @@ func TestEmptyDomain(t *testing.T) { t, func() { config.NewRateLimitConfigImpl( - loadFile("empty_domain.yaml"), stats.NewStore(stats.NewNullSink(), false)) + loadFile("empty_domain.yaml"), mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "empty_domain.yaml: config file cannot have empty domain") } @@ -272,7 +273,7 @@ func TestDuplicateDomain(t *testing.T) { func() { files := loadFile("basic_config.yaml") files = append(files, loadFile("duplicate_domain.yaml")...) - config.NewRateLimitConfigImpl(files, stats.NewStore(stats.NewNullSink(), false)) + config.NewRateLimitConfigImpl(files, mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "duplicate_domain.yaml: duplicate domain 'test-domain' in config file") } @@ -283,7 +284,7 @@ func TestEmptyKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("empty_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "empty_key.yaml: descriptor has empty key") } @@ -294,7 +295,7 @@ func TestDuplicateKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("duplicate_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "duplicate_key.yaml: duplicate descriptor composite key 'test-domain.key1_value1'") } @@ -305,7 +306,7 @@ func TestBadLimitUnit(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("bad_limit_unit.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "bad_limit_unit.yaml: invalid rate limit unit 'foo'") } @@ -316,7 +317,7 @@ func TestBadYaml(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("bad_yaml.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "bad_yaml.yaml: error loading config file: yaml: line 2: found unexpected end of stream") } @@ -327,7 +328,7 @@ func TestMisspelledKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("misspelled_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "misspelled_key.yaml: config error, unknown key 'ratelimit'") @@ -336,7 +337,8 @@ func TestMisspelledKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("misspelled_key2.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + }, "misspelled_key2.yaml: config error, unknown key 'requestsperunit'") } @@ -347,7 +349,7 @@ func TestNonStringKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("non_string_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "non_string_key.yaml: config error, key is not of type string: 0.25") } @@ -358,7 +360,7 @@ func TestNonMapList(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("non_map_list.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "non_map_list.yaml: config error, yaml file contains list of type other than map: a") } diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index 41aa0e5b..1f5bcc31 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -1,6 +1,7 @@ package limiter import ( + stats2 "github.com/envoyproxy/ratelimit/test/mocks/stats" "math/rand" "testing" @@ -22,10 +23,11 @@ func TestGenerateCacheKeys(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) timeSource.EXPECT().UnixNow().Return(int64(1234)) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") + baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -40,10 +42,11 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) timeSource.EXPECT().UnixNow().Return(int64(1234)) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:") + baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -57,7 +60,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { defer controller.Finish() localCache := freecache.NewCache(100) localCache.Set([]byte("key"), []byte("value"), 100) - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "") + sm := stats2.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm) // Returns true, as local cache contains over limit value for the key. assert.Equal(true, baseRateLimit.IsOverLimitWithLocalCache("key")) } @@ -66,11 +70,12 @@ func TestNoOverLimitWithLocalCache(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "") + sm := stats2.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm) // Returns false, as local cache is nil. assert.Equal(false, baseRateLimit.IsOverLimitWithLocalCache("domain_key_value_1234")) localCache := freecache.NewCache(100) - baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "") + baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm) // Returns false, as local cache does not contain value for cache key. assert.Equal(false, baseRateLimitWithLocalCache.IsOverLimitWithLocalCache("domain_key_value_1234")) } @@ -79,8 +84,9 @@ func TestGetResponseStatusEmptyKey(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "") - responseStatus := baseRateLimit.GetResponseDescriptorStatus("", nil, false, 1) + sm := stats2.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("", nil, false, 1, "") assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) } @@ -92,11 +98,12 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := stats2.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2, "") assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) @@ -112,10 +119,11 @@ func TestGetResponseStatusOverLimit(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) localCache := freecache.NewCache(100) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := stats2.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1, "") assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) @@ -133,10 +141,11 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := stats2.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1, "") assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) assert.Equal(uint32(4), responseStatus.GetLimitRemaining()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 1e2ba8d7..76fab06c 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -5,6 +5,7 @@ package memcached_test import ( + stats2 "github.com/envoyproxy/ratelimit/test/mocks/stats" "math/rand" "strconv" "testing" @@ -34,7 +35,8 @@ func TestMemcached(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -43,7 +45,7 @@ func TestMemcached(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -67,7 +69,7 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -97,8 +99,8 @@ func TestMemcached(t *testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3")), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -124,7 +126,8 @@ func TestMemcachedGetError(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -133,7 +136,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -151,7 +154,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value1_1234", uint64(1)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value1"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value1", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -210,7 +213,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { localCache := freecache.NewCache(100) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, sm, 0.8, "") localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio @@ -223,7 +227,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -306,7 +310,8 @@ func TestNearLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -318,7 +323,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -371,7 +376,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -389,7 +394,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -407,7 +412,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -425,7 +430,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -443,7 +448,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -461,7 +466,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -483,7 +488,8 @@ func TestMemcacheWithJitter(t *testing.T) { client := mock_memcached.NewMockClient(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -504,7 +510,7 @@ func TestMemcacheWithJitter(t *testing.T) { ).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -525,7 +531,8 @@ func TestMemcacheAdd(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") // Test a race condition with the initial add timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) @@ -546,7 +553,7 @@ func TestMemcacheAdd(t *testing.T) { uint64(2), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -570,7 +577,7 @@ func TestMemcacheAdd(t *testing.T) { ).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index 38d5b347..5645fbb8 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -8,8 +8,8 @@ import ( context "context" envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" config "github.com/envoyproxy/ratelimit/src/config" + stats2 "github.com/envoyproxy/ratelimit/src/stats" gomock "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" reflect "reflect" ) @@ -88,7 +88,7 @@ func (m *MockRateLimitConfigLoader) EXPECT() *MockRateLimitConfigLoaderMockRecor } // Load mocks base method -func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats.Scope) config.RateLimitConfig { +func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats2.Manager) config.RateLimitConfig { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Load", arg0, arg1) ret0, _ := ret[0].(config.RateLimitConfig) diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go new file mode 100644 index 00000000..f13834df --- /dev/null +++ b/test/mocks/stats/manager.go @@ -0,0 +1,81 @@ +package stats + +import ( + stat "github.com/envoyproxy/ratelimit/src/stats" + stats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" +) + +type MockStatManager struct { + store stats.Store +} + +func (m *MockStatManager) GetStatsStore() stats.Store { + return m.store +} + +func (m *MockStatManager) NewShouldRateLimitStats() stat.ShouldRateLimitStats { + s := m.store.Scope("call.should_rate_limit") + ret := stat.ShouldRateLimitStats{} + ret.RedisError = s.NewCounter("redis_error") + ret.ServiceError = s.NewCounter("service_error") + return ret +} + +func (m *MockStatManager) NewServiceStats() stat.ServiceStats { + ret := stat.ServiceStats{} + ret.ConfigLoadSuccess = m.store.NewCounter("config_load_success") + ret.ConfigLoadError = m.store.NewCounter("config_load_error") + ret.ShouldRateLimit = m.NewShouldRateLimitStats() + return ret +} + +func (m *MockStatManager) NewShouldRateLimitLegacyStats() stat.ShouldRateLimitLegacyStats { + s := m.store.Scope("call.should_rate_limit_legacy") + return stat.ShouldRateLimitLegacyStats{ + ReqConversionError: s.NewCounter("req_conversion_error"), + RespConversionError: s.NewCounter("resp_conversion_error"), + ShouldRateLimitError: s.NewCounter("should_rate_limit_error"), + } +} + +//todo: review mock implementation +func (m *MockStatManager) NewStats(key string) stat.RateLimitStats { + ret := stat.RateLimitStats{} + logger.Debugf("outputing test stats %s", key) + ret.Key = key + ret.TotalHits = m.store.NewCounter(key + ".total_hits") + ret.OverLimit = m.store.NewCounter(key + ".over_limit") + ret.NearLimit = m.store.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = m.store.NewCounter(key + ".over_limit_with_local_cache") + ret.WithinLimit = m.store.NewCounter(key + ".within_limit") + return ret +} + +func (m *MockStatManager) AddTotalHits(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.TotalHits.Add(u) +} + +func (this *MockStatManager) AddOverLimit(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.OverLimit.Add(u) +} + +func (this *MockStatManager) AddNearLimit(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.NearLimit.Add(u) +} + +func (this *MockStatManager) AddOverLimitWithLocalCache(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.OverLimitWithLocalCache.Add(u) +} + +func (this *MockStatManager) AddWithinLimit(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.WithinLimit.Add(u) +} + +func (this *MockStatManager) NewDetailedStats(key string) stat.RateLimitStats { + return this.NewStats(key) +} + +func NewMockStatManager(store stats.Store) stat.Manager { + return &MockStatManager{store: store} +} diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 6c190ea7..37bca184 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -2,6 +2,7 @@ package redis_test import ( "context" + "github.com/envoyproxy/ratelimit/test/mocks/stats" "runtime" "testing" "time" @@ -10,7 +11,7 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "math/rand" @@ -40,13 +41,14 @@ func BenchmarkParallelDoLimit(b *testing.B) { mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int) func(*testing.B) { return func(b *testing.B) { - statsStore := stats.NewStore(stats.NewNullSink(), false) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() - cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "") + cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} // wait for the pool to fill up for { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 65883f4b..e0723333 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,6 +1,7 @@ package redis_test import ( + "github.com/envoyproxy/ratelimit/test/mocks/stats" "testing" "github.com/coocood/freecache" @@ -11,7 +12,7 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "math/rand" @@ -36,17 +37,18 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) client := mock_redis.NewMockClient(controller) perSecondClient := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) var cache limiter.RateLimitCache if usePerSecondRedis { - cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") + cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) } else { - cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) } - statsStore := stats.NewStore(stats.NewNullSink(), false) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) var clientUsed *mock_redis.MockClient @@ -61,7 +63,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -86,7 +88,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -113,8 +115,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3")), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -131,7 +133,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { } } -func testLocalCacheStats(localCacheStats stats.StatGenerator, statsStore stats.Store, sink *common.TestStatSink, +func testLocalCacheStats(localCacheStats gostats.StatGenerator, statsStore gostats.Store, sink *common.TestStatSink, expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int, expectedEntryCount int) func(*testing.T) { return func(t *testing.T) { @@ -175,9 +177,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "") + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm) sink := &common.TestStatSink{} - statsStore := stats.NewStore(sink, true) localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio @@ -190,7 +193,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -271,8 +274,9 @@ func TestNearLimit(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") - statsStore := stats.NewStore(stats.NewNullSink(), false) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -284,7 +288,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -336,7 +340,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -353,7 +357,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -370,7 +374,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -387,7 +391,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -404,7 +408,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -421,7 +425,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -440,8 +444,9 @@ func TestRedisWithJitter(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") - statsStore := stats.NewStore(stats.NewNullSink(), false) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -450,7 +455,7 @@ func TestRedisWithJitter(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index a51ddbe9..a0d7bef6 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -1,6 +1,7 @@ package ratelimit_test import ( + "github.com/envoyproxy/ratelimit/src/stats" "testing" core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -14,7 +15,6 @@ import ( "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/mock/gomock" - "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -80,7 +80,7 @@ func TestServiceLegacy(test *testing.T) { barrier := newBarrier() t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { barrier.signal() }).Return(t.config) + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -93,7 +93,7 @@ func TestServiceLegacy(test *testing.T) { } limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key")), nil} legacyLimits, err := convertRatelimits(limits) if err != nil { @@ -120,7 +120,7 @@ func TestServiceLegacy(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() panic(config.RateLimitConfigError("load error")) }) @@ -130,7 +130,7 @@ func TestServiceLegacy(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key"))} legacyLimits, err = convertRatelimits(limits) if err != nil { t.assert.FailNow(err.Error()) @@ -153,8 +153,8 @@ func TestServiceLegacy(test *testing.T) { response) t.assert.Nil(err) - t.assert.EqualValues(2, t.statStore.NewCounter("config_load_success").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("config_load_error").Value()) + t.assert.EqualValues(2, t.store.NewCounter("config_load_success").Value()) + t.assert.EqualValues(1, t.store.NewCounter("config_load_error").Value()) } func TestEmptyDomainLegacy(test *testing.T) { @@ -166,8 +166,8 @@ func TestEmptyDomainLegacy(test *testing.T) { response, err := service.GetLegacyService().ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("rate limit domain must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) } func TestEmptyDescriptorsLegacy(test *testing.T) { @@ -179,8 +179,8 @@ func TestEmptyDescriptorsLegacy(test *testing.T) { response, err := service.GetLegacyService().ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("rate limit descriptor list must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) } func TestCacheErrorLegacy(test *testing.T) { @@ -193,7 +193,7 @@ func TestCacheErrorLegacy(test *testing.T) { if err != nil { t.assert.FailNow(err.Error()) } - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, req, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -203,8 +203,8 @@ func TestCacheErrorLegacy(test *testing.T) { response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) t.assert.Nil(response) t.assert.Equal("cache error", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.redis_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.redis_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) } func TestInitialLoadErrorLegacy(test *testing.T) { @@ -218,17 +218,17 @@ func TestInitialLoadErrorLegacy(test *testing.T) { t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats.Manager) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.sm, true) request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.GetLegacyService().ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("no rate limit configuration loaded", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) } diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 12c77926..49def355 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -1,6 +1,7 @@ package ratelimit_test import ( + "github.com/envoyproxy/ratelimit/src/stats" "sync" "testing" @@ -13,8 +14,9 @@ import ( mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + stats3 "github.com/envoyproxy/ratelimit/test/mocks/stats" "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -55,7 +57,8 @@ type rateLimitServiceTestSuite struct { configLoader *mock_config.MockRateLimitConfigLoader config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int - statStore stats.Store + sm stats.Manager + store gostats.Store } func commonSetup(t *testing.T) rateLimitServiceTestSuite { @@ -67,7 +70,8 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) - ret.statStore = stats.NewStore(stats.NewNullSink(), false) + ret.store = gostats.NewStore(gostats.NewNullSink(), false) + ret.sm = stats3.NewMockStatManager(ret.store) return ret } @@ -82,7 +86,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore, true) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.sm, true) } func TestService(test *testing.T) { @@ -109,7 +113,7 @@ func TestService(test *testing.T) { barrier := newBarrier() t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { barrier.signal() }).Return(t.config) + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -117,7 +121,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key")), nil} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -139,7 +143,7 @@ func TestService(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() panic(config.RateLimitConfigError("load error")) }) @@ -149,7 +153,7 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( @@ -167,8 +171,8 @@ func TestService(test *testing.T) { response) t.assert.Nil(err) - t.assert.EqualValues(2, t.statStore.NewCounter("config_load_success").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("config_load_error").Value()) + t.assert.EqualValues(2, t.store.NewCounter("config_load_success").Value()) + t.assert.EqualValues(1, t.store.NewCounter("config_load_error").Value()) } func TestEmptyDomain(test *testing.T) { @@ -180,7 +184,7 @@ func TestEmptyDomain(test *testing.T) { response, err := service.ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("rate limit domain must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) } func TestEmptyDescriptors(test *testing.T) { @@ -192,7 +196,7 @@ func TestEmptyDescriptors(test *testing.T) { response, err := service.ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("rate limit descriptor list must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) } func TestCacheError(test *testing.T) { @@ -201,7 +205,7 @@ func TestCacheError(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -211,7 +215,7 @@ func TestCacheError(test *testing.T) { response, err := service.ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("cache error", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.redis_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.redis_error").Value()) } func TestInitialLoadError(test *testing.T) { @@ -225,14 +229,14 @@ func TestInitialLoadError(test *testing.T) { t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats.Manager) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.sm, true) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("no rate limit configuration loaded", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) } diff --git a/test/stats/detailedmetrics_test.go b/test/stats/detailedmetrics_test.go new file mode 100644 index 00000000..bc7ef30a --- /dev/null +++ b/test/stats/detailedmetrics_test.go @@ -0,0 +1,135 @@ +package stats + +import ( + "github.com/envoyproxy/ratelimit/src/config" + ratelimit "github.com/envoyproxy/ratelimit/src/service" + settings "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" + mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" + mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + "github.com/golang/mock/gomock" + gostats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" + "testing" +) + +func commonSetup(t *testing.T, detailedMetrics bool) rateLimitServiceTestSuite { + ret := rateLimitServiceTestSuite{} + ret.assert = assert.New(t) + ret.controller = gomock.NewController(t) + ret.runtime = mock_loader.NewMockIFace(ret.controller) + ret.snapshot = mock_snapshot.NewMockIFace(ret.controller) + ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) + ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) + ret.config = mock_config.NewMockRateLimitConfig(ret.controller) + ret.store = gostats.NewStore(gostats.NewNullSink(), false) + sett := settings.NewSettings() + sett.DetailedMetrics = detailedMetrics + ret.sm = stats.NewStatManager(ret.store, sett) + return ret +} + +type rateLimitServiceTestSuite struct { + assert *assert.Assertions + controller *gomock.Controller + runtime *mock_loader.MockIFace + snapshot *mock_snapshot.MockIFace + cache *mock_limiter.MockRateLimitCache + configLoader *mock_config.MockRateLimitConfigLoader + config *mock_config.MockRateLimitConfig + runtimeUpdateCallback chan<- int + sm stats.Manager + store gostats.Store +} + +func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitServiceServer { + this.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( + func(callback chan<- int) { + this.runtimeUpdateCallback = callback + }) + this.runtime.EXPECT().Snapshot().Return(this.snapshot).MinTimes(1) + this.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) + this.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) + this.configLoader.EXPECT().Load( + []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, + gomock.Any()).Return(this.config) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.sm, true) +} + +func TestDetailedMetricsTotalHits(test *testing.T) { + t := commonSetup(test, true) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddTotalHits(11, rlStats, detailedKey1) + t.sm.AddTotalHits(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).TotalHits.Value()) + assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).TotalHits.Value()) + assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).TotalHits.Value()) +} +func TestDetailedMetricsNearLimit(test *testing.T) { + t := commonSetup(test, true) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddNearLimit(11, rlStats, detailedKey1) + t.sm.AddNearLimit(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).NearLimit.Value()) + assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).NearLimit.Value()) + assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).NearLimit.Value()) +} +func TestDetailedMetricsOverLimit(test *testing.T) { + t := commonSetup(test, true) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddOverLimit(11, rlStats, detailedKey1) + t.sm.AddOverLimit(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimit.Value()) + assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).OverLimit.Value()) + assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).OverLimit.Value()) +} +func TestDetailedMetricsOverLimitWithLocalCache(test *testing.T) { + t := commonSetup(test, true) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddOverLimitWithLocalCache(11, rlStats, detailedKey1) + t.sm.AddOverLimitWithLocalCache(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).OverLimitWithLocalCache.Value()) +} +func TestDetailedMetricsTurnedOff(test *testing.T) { + t := commonSetup(test, false) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddOverLimitWithLocalCache(11, rlStats, detailedKey1) + t.sm.AddOverLimitWithLocalCache(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(0), t.sm.NewDetailedStats(detailedKey1).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(0), t.sm.NewDetailedStats(detailedKey2).OverLimitWithLocalCache.Value()) +}