From 4f51b55542ec720205dc34021bd714c8db885233 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Fri, 12 Feb 2021 16:17:31 -0300 Subject: [PATCH 01/26] Stat manager --- src/config/config.go | 14 ++--- src/config/config_impl.go | 60 +++++---------------- src/config_check_cmd/main.go | 4 +- src/limiter/base_limiter.go | 16 +++--- src/stats/manager.go | 13 +++++ src/stats/manager_impl.go | 102 +++++++++++++++++++++++++++++++++++ 6 files changed, 143 insertions(+), 66 deletions(-) create mode 100644 src/stats/manager.go create mode 100644 src/stats/manager_impl.go diff --git a/src/config/config.go b/src/config/config.go index 43ffbfa1..d7f05132 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -3,7 +3,7 @@ package config import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stats "github.com/lyft/gostats" + stat "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" ) @@ -14,18 +14,10 @@ func (e RateLimitConfigError) Error() string { return string(e) } -// Stats for an individual rate limit config entry. -type RateLimitStats struct { - TotalHits stats.Counter - OverLimit stats.Counter - NearLimit stats.Counter - OverLimitWithLocalCache stats.Counter -} - // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { FullKey string - Stats RateLimitStats + Stats stat.RateLimitStats Limit *pb.RateLimitResponse_RateLimit } @@ -55,5 +47,5 @@ type RateLimitConfigLoader interface { // @param statsScope supplies the stats scope to use for limit stats during runtime. // @return a new configuration. // @throws RateLimitConfigError if the configuration could not be created. - Load(configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig + Load(configs []RateLimitConfigToLoad, manager stat.Manager) RateLimitConfig } diff --git a/src/config/config_impl.go b/src/config/config_impl.go index e19b5ce0..aae6e92e 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -6,10 +6,10 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "gopkg.in/yaml.v2" + stat "github.com/envoyproxy/ratelimit/src/stats" ) type yamlRateLimit struct { @@ -40,7 +40,7 @@ type rateLimitDomain struct { type rateLimitConfigImpl struct { domains map[string]*rateLimitDomain - statsScope stats.Scope + manager stat.Manager } var validKeys = map[string]bool{ @@ -53,19 +53,6 @@ var validKeys = map[string]bool{ "requests_per_unit": true, } -// Create new rate limit stats for a config entry. -// @param statsScope supplies the owning scope. -// @param key supplies the fully resolved key name of the entry. -// @return new stats. -func newRateLimitStats(statsScope stats.Scope, key string) RateLimitStats { - ret := RateLimitStats{} - ret.TotalHits = statsScope.NewCounter(key + ".total_hits") - ret.OverLimit = statsScope.NewCounter(key + ".over_limit") - ret.NearLimit = statsScope.NewCounter(key + ".near_limit") - ret.OverLimitWithLocalCache = statsScope.NewCounter(key + ".over_limit_with_local_cache") - return ret -} - // Create a new rate limit config entry. // @param requestsPerUnit supplies the requests per unit of time for the entry. // @param unit supplies the unit of time for the entry. @@ -73,9 +60,9 @@ func newRateLimitStats(statsScope stats.Scope, key string) RateLimitStats { // @param scope supplies the owning scope. // @return the new config entry. func NewRateLimit( - requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, key string, scope stats.Scope) *RateLimit { + requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats RateLimitStats) *RateLimit { - return &RateLimit{FullKey: key, Stats: newRateLimitStats(scope, key), Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} + return &RateLimit{FullKey: rlStats.String(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} } // Dump an individual descriptor for debugging purposes. @@ -104,9 +91,7 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit // @param parentKey supplies the fully resolved key name that owns this config level. // @param descriptors supplies the YAML descriptors to load. // @param statsScope supplies the owning scope. -func (this *rateLimitDescriptor) loadDescriptors( - config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, - statsScope stats.Scope) { +func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, manager stat.Manager) { for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { @@ -137,8 +122,7 @@ func (this *rateLimitDescriptor) loadDescriptors( } rateLimit = NewRateLimit( - descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), newParentKey, - statsScope) + descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), manager.NewStats(newParentKey)) rateLimitDebugString = fmt.Sprintf( " ratelimit={requests_per_unit=%d, unit=%s}", rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit.String()) @@ -147,8 +131,7 @@ func (this *rateLimitDescriptor) loadDescriptors( logger.Debugf( "loading descriptor: key=%s%s", newParentKey, rateLimitDebugString) newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit} - newDescriptor.loadDescriptors( - config, newParentKey+".", descriptorConfig.Descriptors, statsScope) + newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, manager) this.descriptors[finalKey] = newDescriptor } } @@ -228,24 +211,10 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { logger.Debugf("loading domain: %s", root.Domain) newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}} - newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsScope) + newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.manager) this.domains[root.Domain] = newDomain } -func (this *rateLimitConfigImpl) descriptorToKey(descriptor *pb_struct.RateLimitDescriptor) string { - rateLimitKey := "" - for _, entry := range descriptor.Entries { - if rateLimitKey != "" { - rateLimitKey += "." - } - rateLimitKey += entry.Key - if entry.Value != "" { - rateLimitKey += "_" + entry.Value - } - } - return rateLimitKey -} - func (this *rateLimitConfigImpl) Dump() string { ret := "" for _, domain := range this.domains { @@ -267,13 +236,12 @@ func (this *rateLimitConfigImpl) GetLimit( } if descriptor.GetLimit() != nil { - rateLimitKey := domain + "." + this.descriptorToKey(descriptor) + rateLimitKey := stat.DescriptorKey(domain, descriptor) rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit()) rateLimit = NewRateLimit( descriptor.GetLimit().GetRequestsPerUnit(), rateLimitOverrideUnit, - rateLimitKey, - this.statsScope) + this.manager.NewStats(rateLimitKey)) return rateLimit } @@ -315,9 +283,9 @@ func (this *rateLimitConfigImpl) GetLimit( // @param stats supplies the stats scope to use for limit stats during runtime. // @return a new config. func NewRateLimitConfigImpl( - configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig { + configs []RateLimitConfigToLoad, manager stat.Manager) RateLimitConfig { - ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsScope} + ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, manager} for _, config := range configs { ret.loadConfig(config) } @@ -328,9 +296,9 @@ func NewRateLimitConfigImpl( type rateLimitConfigLoaderImpl struct{} func (this *rateLimitConfigLoaderImpl) Load( - configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig { + configs []RateLimitConfigToLoad, manager stat.Manager) RateLimitConfig { - return NewRateLimitConfigImpl(configs, statsScope) + return NewRateLimitConfigImpl(configs, manager) } // @return a new default config loader implementation. diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index f9f3c742..b122649d 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -3,6 +3,7 @@ package main import ( "flag" "fmt" + stat "github.com/envoyproxy/ratelimit/src/stats" "io/ioutil" "os" "path/filepath" @@ -21,7 +22,8 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { }() dummyStats := stats.NewStore(stats.NewNullSink(), false) - config.NewRateLimitConfigImpl(allConfigs, dummyStats) + manager := stat.NewStatManager(dummyStats, false) + config.NewRateLimitConfigImpl(allConfigs, manager) } func main() { diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 4a9aec1d..30214696 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -47,7 +47,7 @@ func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { - limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) + limits[i].Stats.AddTotalHits(uint64(hitsAddend)) } } return cacheKeys @@ -74,8 +74,8 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * nil, 0) } if isOverLimitWithLocalCache { - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) - limitInfo.limit.Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.AddOverLimit(uint64(hitsAddend)) + limitInfo.limit.Stats.AddOverLimitWithLocalCache(uint64(hitsAddend)) return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) } @@ -133,13 +133,13 @@ func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { // Otherwise, only the difference between the current limit value and the over limit threshold // were over limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.AddOverLimit(uint64(hitsAddend)) } else { - limitInfo.limit.Stats.OverLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold)) + limitInfo.limit.Stats.AddOverLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold)) // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - + limitInfo.limit.Stats.AddNearLimit(uint64(limitInfo.overLimitThreshold - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) } } @@ -151,9 +151,9 @@ func checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { // only the difference between the current limit value and the near limit threshold were near // limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold { - limitInfo.limit.Stats.NearLimit.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.AddNearLimit(uint64(hitsAddend)) } else { - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold)) + limitInfo.limit.Stats.AddNearLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold)) } } } diff --git a/src/stats/manager.go b/src/stats/manager.go new file mode 100644 index 00000000..b5d17211 --- /dev/null +++ b/src/stats/manager.go @@ -0,0 +1,13 @@ +package stats + +import ( + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" +) + +type Manager interface { + AddTotalHits(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) + AddOverLimit(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) + AddNearLimit(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) + AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) + NewStats(key string) RateLimitStats +} diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go new file mode 100644 index 00000000..e9780580 --- /dev/null +++ b/src/stats/manager_impl.go @@ -0,0 +1,102 @@ +package stats + +import ( + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + st "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" +) + +func NewStatManager(scope st.Scope, detailedMetrics bool) *ManagerImpl { + return &ManagerImpl{ + scope: scope, + detailed: detailedMetrics, + } +} + +type ManagerImpl struct { + scope st.Scope + detailed bool +} + +func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) { + rlStats.TotalHits.Add(u) + if this.detailed { + stat := this.getDescriptorStat(domain, descriptor) + stat.OverLimit.Add(u) + } +} + +func (this *ManagerImpl) AddOverLimit(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) { + rlStats.OverLimit.Add(u) + if this.detailed { + stat := this.getDescriptorStat(domain, descriptor) + stat.OverLimit.Add(u) + } +} + +func (this *ManagerImpl) AddNearLimit(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) { + rlStats.NearLimit.Add(u) + if this.detailed { + stat := this.getDescriptorStat(domain, descriptor) + stat.OverLimit.Add(u) + } +} + +func (this *ManagerImpl) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) { + rlStats.OverLimitWithLocalCache.Add(u) + if this.detailed { + stat := this.getDescriptorStat(domain, descriptor) + stat.OverLimit.Add(u) + } +} + +//todo: consider adding a ratelimitstats cache +//todo: add descriptor fields parameter to allow configuration of descriptor entries for which metrics will be emited. +func (this *ManagerImpl) getDescriptorStat(domain string, descriptor *pb_struct.RateLimitDescriptor) RateLimitStats { + key := DescriptorKey(domain, descriptor) + ret := this.NewStats(key) + return ret +} + +// Create new rate descriptor stats for a descriptor tuple. +// @param statsScope supplies the owning scope. +// @param key supplies the fully resolved descriptor tuple. +// @return new stats. +func (this *ManagerImpl) NewStats(key string) RateLimitStats { + ret := RateLimitStats{} + logger.Debugf("outputing test stats %s", key) + ret.Key = key + ret.TotalHits = this.scope.NewCounter(key + ".detailed_total_hits") + ret.OverLimit = this.scope.NewCounter(key + ".detailed_over_limit") + ret.NearLimit = this.scope.NewCounter(key + ".detailed_near_limit") + ret.OverLimitWithLocalCache = this.scope.NewCounter(key + ".detailed_over_limit_with_local_cache") + return ret +} + +func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) string { + rateLimitKey := "" + for _, entry := range descriptor.Entries { + if rateLimitKey != "" { + rateLimitKey += "." + } + rateLimitKey += entry.Key + if entry.Value != "" { + rateLimitKey += "_" + entry.Value + } + } + return domain + "." + rateLimitKey +} + + +// Stats for an individual rate limit config entry. +//todo: unexport fields +type RateLimitStats struct { + Key string + TotalHits st.Counter + OverLimit st.Counter + NearLimit st.Counter + OverLimitWithLocalCache st.Counter +} +func (this RateLimitStats) String() string { + return this.Key +} \ No newline at end of file From 4bd3fb17da77a1e9a16900461284456924aff4fe Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Fri, 12 Feb 2021 17:06:43 -0300 Subject: [PATCH 02/26] stats manager usage --- src/config/config_impl.go | 2 +- src/limiter/base_limiter.go | 34 ++++++++++++++++++++-------------- src/stats/manager.go | 12 ++++-------- src/stats/manager_impl.go | 25 ++++++++++++------------- 4 files changed, 37 insertions(+), 36 deletions(-) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index aae6e92e..433d95a0 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -60,7 +60,7 @@ var validKeys = map[string]bool{ // @param scope supplies the owning scope. // @return the new config entry. func NewRateLimit( - requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats RateLimitStats) *RateLimit { + requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stat.RateLimitStats) *RateLimit { return &RateLimit{FullKey: rlStats.String(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} } diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 30214696..6d48b79d 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -5,6 +5,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" logger "github.com/sirupsen/logrus" "math" @@ -18,6 +19,7 @@ type BaseRateLimiter struct { cacheKeyGenerator CacheKeyGenerator localCache *freecache.Cache nearLimitRatio float32 + Manager stats.Manager } type LimitInfo struct { @@ -47,7 +49,7 @@ func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { - limits[i].Stats.AddTotalHits(uint64(hitsAddend)) + this.Manager.AddTotalHits(uint64(hitsAddend),limits[i].Stats, cacheKeys[i].Key) } } return cacheKeys @@ -74,8 +76,8 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * nil, 0) } if isOverLimitWithLocalCache { - limitInfo.limit.Stats.AddOverLimit(uint64(hitsAddend)) - limitInfo.limit.Stats.AddOverLimitWithLocalCache(uint64(hitsAddend)) + this.Manager.AddOverLimit(uint64(hitsAddend),limitInfo.limit.Stats,key) + this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend),limitInfo.limit.Stats,key) return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) } @@ -89,7 +91,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) - checkOverLimitThreshold(limitInfo, hitsAddend) + this.checkOverLimitThreshold(limitInfo, hitsAddend, key) if this.localCache != nil { // Set the TTL of the local_cache to be the entire duration. @@ -109,13 +111,13 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) // The limit is OK but we additionally want to know if we are near the limit. - checkNearLimitThreshold(limitInfo, hitsAddend) + this.checkNearLimitThreshold(limitInfo, hitsAddend, key) } return responseDescriptorStatus } func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, - localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *BaseRateLimiter { + localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stats.Manager) *BaseRateLimiter { return &BaseRateLimiter{ timeSource: timeSource, JitterRand: jitterRand, @@ -123,37 +125,41 @@ func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expira cacheKeyGenerator: NewCacheKeyGenerator(cacheKeyPrefix), localCache: localCache, nearLimitRatio: nearLimitRatio, + Manager: manager, } } -func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, key string) { // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the // N hits was over the limit, then all the N hits were over limit. // Otherwise, only the difference between the current limit value and the over limit threshold // were over limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { - limitInfo.limit.Stats.AddOverLimit(uint64(hitsAddend)) + this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend), limitInfo.limit.Stats, key) } else { - limitInfo.limit.Stats.AddOverLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold)) + this.Manager.AddOverLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold), limitInfo.limit.Stats, key) // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. - limitInfo.limit.Stats.AddNearLimit(uint64(limitInfo.overLimitThreshold - - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) + this.Manager.AddNearLimit( + uint64(limitInfo.overLimitThreshold - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)), + limitInfo.limit.Stats, + key, + ) } } -func checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, key string) { if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold { // Here we also need to assess which portion of the hitsAddend were in the near limit range. // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, // only the difference between the current limit value and the near limit threshold were near // limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold { - limitInfo.limit.Stats.AddNearLimit(uint64(hitsAddend)) + this.Manager.AddNearLimit(uint64(hitsAddend), limitInfo.limit.Stats, key) } else { - limitInfo.limit.Stats.AddNearLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold)) + this.Manager.AddNearLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold), limitInfo.limit.Stats, key) } } } diff --git a/src/stats/manager.go b/src/stats/manager.go index b5d17211..2a76c444 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -1,13 +1,9 @@ package stats -import ( - pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" -) - type Manager interface { - AddTotalHits(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) - AddOverLimit(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) - AddNearLimit(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) - AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) + AddTotalHits(u uint64, rlStats RateLimitStats, key string) + AddOverLimit(u uint64, rlStats RateLimitStats, key string) + AddNearLimit(u uint64, rlStats RateLimitStats, key string) + AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, key string) NewStats(key string) RateLimitStats } diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index e9780580..56ddda5d 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -18,42 +18,41 @@ type ManagerImpl struct { detailed bool } -func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) { +func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, key string) { rlStats.TotalHits.Add(u) if this.detailed { - stat := this.getDescriptorStat(domain, descriptor) - stat.OverLimit.Add(u) + stat := this.getDescriptorStat(key) + stat.TotalHits.Add(u) } } -func (this *ManagerImpl) AddOverLimit(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) { +func (this *ManagerImpl) AddOverLimit(u uint64, rlStats RateLimitStats, key string) { rlStats.OverLimit.Add(u) if this.detailed { - stat := this.getDescriptorStat(domain, descriptor) + stat := this.getDescriptorStat(key) stat.OverLimit.Add(u) } } -func (this *ManagerImpl) AddNearLimit(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) { +func (this *ManagerImpl) AddNearLimit(u uint64, rlStats RateLimitStats, key string) { rlStats.NearLimit.Add(u) if this.detailed { - stat := this.getDescriptorStat(domain, descriptor) - stat.OverLimit.Add(u) + stat := this.getDescriptorStat(key) + stat.NearLimit.Add(u) } } -func (this *ManagerImpl) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, domain string, descriptor *pb_struct.RateLimitDescriptor) { +func (this *ManagerImpl) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, key string) { rlStats.OverLimitWithLocalCache.Add(u) if this.detailed { - stat := this.getDescriptorStat(domain, descriptor) - stat.OverLimit.Add(u) + stat := this.getDescriptorStat(key) + stat.OverLimitWithLocalCache.Add(u) } } //todo: consider adding a ratelimitstats cache //todo: add descriptor fields parameter to allow configuration of descriptor entries for which metrics will be emited. -func (this *ManagerImpl) getDescriptorStat(domain string, descriptor *pb_struct.RateLimitDescriptor) RateLimitStats { - key := DescriptorKey(domain, descriptor) +func (this *ManagerImpl) getDescriptorStat(key string) RateLimitStats { ret := this.NewStats(key) return ret } From 6b6f7b385e53c1eb66fe2ee13cecd219a259241f Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Wed, 17 Feb 2021 12:06:54 -0300 Subject: [PATCH 03/26] detailed metrics in settings --- src/config_check_cmd/main.go | 5 +++-- src/memcached/cache_impl.go | 9 +++++---- src/redis/cache_impl.go | 4 +++- src/redis/fixed_cache_impl.go | 5 +++-- src/service/ratelimit.go | 2 +- src/service_cmd/runner/runner.go | 8 ++++++-- src/settings/settings.go | 3 +++ 7 files changed, 24 insertions(+), 12 deletions(-) diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index b122649d..e29fcea6 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -3,6 +3,7 @@ package main import ( "flag" "fmt" + "github.com/envoyproxy/ratelimit/src/settings" stat "github.com/envoyproxy/ratelimit/src/stats" "io/ioutil" "os" @@ -20,9 +21,9 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { os.Exit(1) } }() - + settingStruct := settings.NewSettings() dummyStats := stats.NewStore(stats.NewNullSink(), false) - manager := stat.NewStatManager(dummyStats, false) + manager := stat.NewStatManager(dummyStats, settingStruct.DetailedMetrics) config.NewRateLimitConfigImpl(allConfigs, manager) } diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 892565e8..90b397c8 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -17,6 +17,7 @@ package memcached import ( "context" + stat "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "strconv" "sync" @@ -174,7 +175,7 @@ func (this *rateLimitMemcacheImpl) Flush() { } func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, manager stat.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { return &rateLimitMemcacheImpl{ client: client, timeSource: timeSource, @@ -182,19 +183,19 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix), + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, manager), } } func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, - localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache { + localCache *freecache.Cache, scope stats.Scope, manager stat.Manager) limiter.RateLimitCache { return NewRateLimitCacheImpl( CollectStats(memcache.New(s.MemcacheHostPort), scope.Scope("memcache")), timeSource, jitterRand, s.ExpirationJitterMaxSeconds, localCache, - scope, + manager, s.NearLimitRatio, s.CacheKeyPrefix, ) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 7e619b66..36efd85b 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + stat "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" @@ -10,7 +11,7 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) -func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache { +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, manager stat.Manager) limiter.RateLimitCache { var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, @@ -29,5 +30,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca localCache, s.NearLimitRatio, s.CacheKeyPrefix, + manager, ) } diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index b2b3d3d2..387ad727 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + stat "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" @@ -107,10 +108,10 @@ func (this *fixedRateLimitCacheImpl) DoLimit( func (this *fixedRateLimitCacheImpl) Flush() {} func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource, - jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stat.Manager) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix), + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, manager), } } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 126bb776..643c3e16 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -84,7 +84,7 @@ func (this *service) reloadConfig() { files = append(files, config.RateLimitConfigToLoad{key, snapshot.Get(key)}) } - newConfig := this.configLoader.Load(files, this.rlStatsScope) + newConfig := this.configLoader.Load(files, ) this.stats.configLoadSuccess.Inc() this.configLock.Lock() this.config = newConfig diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index afa1b144..14035134 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -1,6 +1,7 @@ package runner import ( + stats2 "github.com/envoyproxy/ratelimit/src/stats" "io" "math/rand" "net/http" @@ -45,6 +46,7 @@ func (runner *Runner) GetStatsStore() stats.Store { } func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache) limiter.RateLimitCache { + manager := stats2.NewStatManager(srv.Scope(), s.DetailedMetrics) switch s.BackendType { case "redis", "": return redis.NewRateLimiterCacheImplFromSettings( @@ -53,14 +55,16 @@ func createLimiter(srv server.Server, s settings.Settings, localCache *freecache srv, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), - s.ExpirationJitterMaxSeconds) + s.ExpirationJitterMaxSeconds, + manager) case "memcache": return memcached.NewRateLimitCacheImplFromSettings( s, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), localCache, - srv.Scope()) + srv.Scope(), + manager) default: logger.Fatalf("Invalid setting for BackendType: %s", s.BackendType) panic("This line should not be reachable") diff --git a/src/settings/settings.go b/src/settings/settings.go index 8468076e..b91eb0e1 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -59,6 +59,9 @@ type Settings struct { // Memcache settings MemcacheHostPort string `envconfig:"MEMCACHE_HOST_PORT" default:""` + + //Detailed Metrics Mode + DetailedMetrics bool `envconfig:"DETAILED_METRICS_MODE" default:"false"` } type Option func(*Settings) From 73b1a4d3a160ce5dbb7f23c8a84891dc30ac7b9b Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Wed, 17 Feb 2021 15:36:58 -0300 Subject: [PATCH 04/26] move more stats responsibility to manager --- src/service/ratelimit.go | 56 ++++++++--------------------- src/service/ratelimit_legacy.go | 25 +++---------- src/service_cmd/runner/runner.go | 5 ++- src/stats/manager.go | 3 ++ src/stats/manager_impl.go | 60 +++++++++++++++++++++++++++----- test/mocks/stats/manager.go | 1 + 6 files changed, 79 insertions(+), 71 deletions(-) create mode 100644 test/mocks/stats/manager.go diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 643c3e16..f0eecbf6 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -2,6 +2,7 @@ package ratelimit import ( "fmt" + stats2 "github.com/envoyproxy/ratelimit/src/stats" "strings" "sync" @@ -11,37 +12,10 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/lyft/goruntime/loader" - stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) -type shouldRateLimitStats struct { - redisError stats.Counter - serviceError stats.Counter -} - -func newShouldRateLimitStats(scope stats.Scope) shouldRateLimitStats { - ret := shouldRateLimitStats{} - ret.redisError = scope.NewCounter("redis_error") - ret.serviceError = scope.NewCounter("service_error") - return ret -} - -type serviceStats struct { - configLoadSuccess stats.Counter - configLoadError stats.Counter - shouldRateLimit shouldRateLimitStats -} - -func newServiceStats(scope stats.Scope) serviceStats { - ret := serviceStats{} - ret.configLoadSuccess = scope.NewCounter("config_load_success") - ret.configLoadError = scope.NewCounter("config_load_error") - ret.shouldRateLimit = newShouldRateLimitStats(scope.Scope("call.should_rate_limit")) - return ret -} - type RateLimitServiceServer interface { pb.RateLimitServiceServer GetCurrentConfig() config.RateLimitConfig @@ -55,13 +29,12 @@ type service struct { config config.RateLimitConfig runtimeUpdateEvent chan int cache limiter.RateLimitCache - stats serviceStats - rlStatsScope stats.Scope + stats stats2.ServiceStats legacy *legacyService runtimeWatchRoot bool } -func (this *service) reloadConfig() { +func (this *service) reloadConfig(manager stats2.Manager) { defer func() { if e := recover(); e != nil { configError, ok := e.(config.RateLimitConfigError) @@ -69,7 +42,7 @@ func (this *service) reloadConfig() { panic(e) } - this.stats.configLoadError.Inc() + this.stats.ConfigLoadError.Inc() logger.Errorf("error loading new configuration from runtime: %s", configError.Error()) } }() @@ -84,8 +57,8 @@ func (this *service) reloadConfig() { files = append(files, config.RateLimitConfigToLoad{key, snapshot.Get(key)}) } - newConfig := this.configLoader.Load(files, ) - this.stats.configLoadSuccess.Inc() + newConfig := this.configLoader.Load(files, manager) + this.stats.ConfigLoadSuccess.Inc() this.configLock.Lock() this.config = newConfig this.configLock.Unlock() @@ -170,12 +143,12 @@ func (this *service) ShouldRateLimit( switch t := err.(type) { case redis.RedisError: { - this.stats.shouldRateLimit.redisError.Inc() + this.stats.ShouldRateLimit.RedisError.Inc() finalError = t } case serviceError: { - this.stats.shouldRateLimit.serviceError.Inc() + this.stats.ShouldRateLimit.ServiceError.Inc() finalError = t } default: @@ -197,9 +170,9 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { defer this.configLock.RUnlock() return this.config } - +//todo: add methods to interface func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, stats stats.Scope, runtimeWatchRoot bool) RateLimitServiceServer { + configLoader config.RateLimitConfigLoader, manager stats2.Manager, runtimeWatchRoot bool) RateLimitServiceServer { newService := &service{ runtime: runtime, @@ -208,25 +181,24 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache, config: nil, runtimeUpdateEvent: make(chan int), cache: cache, - stats: newServiceStats(stats), - rlStatsScope: stats.Scope("rate_limit"), + stats: manager.NewServiceStats(), runtimeWatchRoot: runtimeWatchRoot, } newService.legacy = &legacyService{ s: newService, - shouldRateLimitLegacyStats: newShouldRateLimitLegacyStats(stats), + shouldRateLimitLegacyStats: manager.NewShouldRateLimitLegacyStats(), } runtime.AddUpdateCallback(newService.runtimeUpdateEvent) - newService.reloadConfig() + newService.reloadConfig(manager) go func() { // No exit right now. for { logger.Debugf("waiting for runtime update") <-newService.runtimeUpdateEvent logger.Debugf("got runtime update and reloading config") - newService.reloadConfig() + newService.reloadConfig(manager) } }() diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index 17112675..8a420fd6 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -5,7 +5,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/gostats" + stats2 "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" ) @@ -17,22 +17,7 @@ type RateLimitLegacyServiceServer interface { // the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. type legacyService struct { s *service - shouldRateLimitLegacyStats shouldRateLimitLegacyStats -} - -type shouldRateLimitLegacyStats struct { - reqConversionError stats.Counter - respConversionError stats.Counter - shouldRateLimitError stats.Counter -} - -func newShouldRateLimitLegacyStats(scope stats.Scope) shouldRateLimitLegacyStats { - s := scope.Scope("call.should_rate_limit_legacy") - return shouldRateLimitLegacyStats{ - reqConversionError: s.NewCounter("req_conversion_error"), - respConversionError: s.NewCounter("resp_conversion_error"), - shouldRateLimitError: s.NewCounter("should_rate_limit_error"), - } + shouldRateLimitLegacyStats stats2.ShouldRateLimitLegacyStats } func (this *legacyService) ShouldRateLimit( @@ -41,18 +26,18 @@ func (this *legacyService) ShouldRateLimit( request, err := ConvertLegacyRequest(legacyRequest) if err != nil { - this.shouldRateLimitLegacyStats.reqConversionError.Inc() + this.shouldRateLimitLegacyStats.ReqConversionError.Inc() return nil, err } resp, err := this.s.ShouldRateLimit(ctx, request) if err != nil { - this.shouldRateLimitLegacyStats.shouldRateLimitError.Inc() + this.shouldRateLimitLegacyStats.ShouldRateLimitError.Inc() return nil, err } legacyResponse, err := ConvertResponse(resp) if err != nil { - this.shouldRateLimitLegacyStats.respConversionError.Inc() + this.shouldRateLimitLegacyStats.RespConversionError.Inc() return nil, err } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 14035134..c4c9ac17 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -100,11 +100,14 @@ func (runner *Runner) Run() { runner.srv = srv runner.mu.Unlock() + + manager := stats2.NewStatManager(srv.Scope().Scope("service"), s.DetailedMetrics) + service := ratelimit.NewService( srv.Runtime(), createLimiter(srv, s, localCache), config.NewRateLimitConfigLoaderImpl(), - srv.Scope().Scope("service"), + manager, s.RuntimeWatchRoot, ) diff --git a/src/stats/manager.go b/src/stats/manager.go index 2a76c444..bdd22512 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -6,4 +6,7 @@ type Manager interface { AddNearLimit(u uint64, rlStats RateLimitStats, key string) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, key string) NewStats(key string) RateLimitStats + NewShouldRateLimitStats() ShouldRateLimitStats + NewServiceStats() ServiceStats + NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats } diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 56ddda5d..f440bd22 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -2,11 +2,11 @@ package stats import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" - st "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" ) -func NewStatManager(scope st.Scope, detailedMetrics bool) *ManagerImpl { +func NewStatManager(scope gostats.Scope, detailedMetrics bool) *ManagerImpl { return &ManagerImpl{ scope: scope, detailed: detailedMetrics, @@ -14,7 +14,7 @@ func NewStatManager(scope st.Scope, detailedMetrics bool) *ManagerImpl { } type ManagerImpl struct { - scope st.Scope + scope gostats.Scope detailed bool } @@ -72,6 +72,50 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { return ret } + +type ShouldRateLimitLegacyStats struct { + ReqConversionError gostats.Counter + RespConversionError gostats.Counter + ShouldRateLimitError gostats.Counter +} + + +func (this *ManagerImpl) NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats { + s := this.scope.Scope("call.should_rate_limit_legacy") + return ShouldRateLimitLegacyStats{ + ReqConversionError: s.NewCounter("req_conversion_error"), + RespConversionError: s.NewCounter("resp_conversion_error"), + ShouldRateLimitError: s.NewCounter("should_rate_limit_error"), + } +} + +type ShouldRateLimitStats struct { + RedisError gostats.Counter + ServiceError gostats.Counter +} + +func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { + s := this.scope.Scope("call.should_rate_limit") + ret := ShouldRateLimitStats{} + ret.RedisError = s.NewCounter("redis_error") + ret.ServiceError = s.NewCounter("service_error") + return ret +} + +type ServiceStats struct { + ConfigLoadSuccess gostats.Counter + ConfigLoadError gostats.Counter + ShouldRateLimit ShouldRateLimitStats +} + +func (this *ManagerImpl) NewServiceStats() ServiceStats { + ret := ServiceStats{} + ret.ConfigLoadSuccess = this.scope.NewCounter("config_load_success") + ret.ConfigLoadError = this.scope.NewCounter("config_load_error") + ret.ShouldRateLimit = this.NewShouldRateLimitStats() + return ret +} + func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) string { rateLimitKey := "" for _, entry := range descriptor.Entries { @@ -90,11 +134,11 @@ func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) str // Stats for an individual rate limit config entry. //todo: unexport fields type RateLimitStats struct { - Key string - TotalHits st.Counter - OverLimit st.Counter - NearLimit st.Counter - OverLimitWithLocalCache st.Counter + Key string + TotalHits gostats.Counter + OverLimit gostats.Counter + NearLimit gostats.Counter + OverLimitWithLocalCache gostats.Counter } func (this RateLimitStats) String() string { return this.Key diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go new file mode 100644 index 00000000..43b4fd56 --- /dev/null +++ b/test/mocks/stats/manager.go @@ -0,0 +1 @@ +package stats From 854c75eb8d85c4630bc042af9b9cc085a1fd7ac5 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Wed, 17 Feb 2021 15:38:32 -0300 Subject: [PATCH 05/26] compiling tests --- test/config/config_test.go | 88 ++++++++++++++------------- test/limiter/base_limiter_test.go | 37 ++++++----- test/memcached/cache_impl_test.go | 53 +++++++++------- test/mocks/config/config.go | 4 +- test/mocks/stats/manager.go | 46 ++++++++++++++ test/redis/bench_test.go | 6 +- test/redis/fixed_cache_impl_test.go | 45 ++++++++------ test/service/ratelimit_legacy_test.go | 28 ++++----- test/service/ratelimit_test.go | 30 +++++---- 9 files changed, 206 insertions(+), 131 deletions(-) diff --git a/test/config/config_test.go b/test/config/config_test.go index 96638165..429bc590 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -9,6 +9,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/envoyproxy/ratelimit/src/config" + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" "github.com/lyft/gostats" "github.com/stretchr/testify/assert" ) @@ -23,8 +24,8 @@ func loadFile(path string) []config.RateLimitConfigToLoad { func TestBasicConfig(t *testing.T) { assert := assert.New(t) - stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), stats) + newStore := stats.NewStore(stats.NewNullSink(), false) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(newStore)) rlConfig.Dump() assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{})) assert.Nil(rlConfig.GetLimit(nil, "test-domain", &pb_struct.RateLimitDescriptor{})) @@ -67,9 +68,9 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() assert.EqualValues(5, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_SECOND, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1.near_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -82,11 +83,11 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(10, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_SECOND, rl.Limit.Unit) assert.EqualValues( - 1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.total_hits").Value()) + 1, newStore.NewCounter("test-domain.key1_value1.subkey1_subvalue1.total_hits").Value()) assert.EqualValues( - 1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.over_limit").Value()) + 1, newStore.NewCounter("test-domain.key1_value1.subkey1_subvalue1.over_limit").Value()) assert.EqualValues( - 1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.near_limit").Value()) + 1, newStore.NewCounter("test-domain.key1_value1.subkey1_subvalue1.near_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -98,9 +99,9 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() assert.EqualValues(20, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_MINUTE, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key2.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2.near_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -112,9 +113,9 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() assert.EqualValues(30, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_MINUTE, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2_value2.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2_value2.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key2_value2.near_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -133,9 +134,9 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() assert.EqualValues(1, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_HOUR, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key3.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key3.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key3.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key3.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key3.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key3.near_limit").Value()) rl = rlConfig.GetLimit( nil, "test-domain", @@ -147,15 +148,15 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() assert.EqualValues(1, rl.Limit.RequestsPerUnit) assert.Equal(pb.RateLimitResponse_RateLimit_DAY, rl.Limit.Unit) - assert.EqualValues(1, stats.NewCounter("test-domain.key4.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key4.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key4.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key4.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key4.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key4.near_limit").Value()) } func TestConfigLimitOverride(t *testing.T) { assert := assert.New(t) - stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), stats) + newStore := stats.NewStore(stats.NewNullSink(), false) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(newStore)) rlConfig.Dump() // No matching domain assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{ @@ -179,11 +180,11 @@ func TestConfigLimitOverride(t *testing.T) { rl.Stats.TotalHits.Inc() rl.Stats.OverLimit.Inc() rl.Stats.NearLimit.Inc() - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) - // Change in override value doesn't erase stats + // Change in override value doesn't erase newStore rl = rlConfig.GetLimit( nil, "test-domain", &pb_struct.RateLimitDescriptor{ @@ -200,9 +201,9 @@ func TestConfigLimitOverride(t *testing.T) { RequestsPerUnit: 42, Unit: pb.RateLimitResponse_RateLimit_HOUR, }, rl.Limit) - assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) - assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) - assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) + assert.EqualValues(2, newStore.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) + assert.EqualValues(2, newStore.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) + assert.EqualValues(2, newStore.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) // Different value creates a different counter rl = rlConfig.GetLimit( @@ -221,9 +222,9 @@ func TestConfigLimitOverride(t *testing.T) { rl.Stats.TotalHits.Inc() rl.Stats.OverLimit.Inc() rl.Stats.NearLimit.Inc() - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.total_hits").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.over_limit").Value()) - assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.near_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something_else.total_hits").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something_else.over_limit").Value()) + assert.EqualValues(1, newStore.NewCounter("test-domain.key1_value1.subkey1_something_else.near_limit").Value()) } func expectConfigPanic(t *testing.T, call func(), expectedError string) { @@ -242,7 +243,7 @@ func TestEmptyDomain(t *testing.T) { t, func() { config.NewRateLimitConfigImpl( - loadFile("empty_domain.yaml"), stats.NewStore(stats.NewNullSink(), false)) + loadFile("empty_domain.yaml"), mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "empty_domain.yaml: config file cannot have empty domain") } @@ -253,7 +254,7 @@ func TestDuplicateDomain(t *testing.T) { func() { files := loadFile("basic_config.yaml") files = append(files, loadFile("duplicate_domain.yaml")...) - config.NewRateLimitConfigImpl(files, stats.NewStore(stats.NewNullSink(), false)) + config.NewRateLimitConfigImpl(files, mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "duplicate_domain.yaml: duplicate domain 'test-domain' in config file") } @@ -264,7 +265,7 @@ func TestEmptyKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("empty_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "empty_key.yaml: descriptor has empty key") } @@ -275,7 +276,7 @@ func TestDuplicateKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("duplicate_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "duplicate_key.yaml: duplicate descriptor composite key 'test-domain.key1_value1'") } @@ -286,7 +287,7 @@ func TestBadLimitUnit(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("bad_limit_unit.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "bad_limit_unit.yaml: invalid rate limit unit 'foo'") } @@ -297,7 +298,7 @@ func TestBadYaml(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("bad_yaml.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "bad_yaml.yaml: error loading config file: yaml: line 2: found unexpected end of stream") } @@ -308,7 +309,7 @@ func TestMisspelledKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("misspelled_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "misspelled_key.yaml: config error, unknown key 'ratelimit'") @@ -317,7 +318,8 @@ func TestMisspelledKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("misspelled_key2.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + }, "misspelled_key2.yaml: config error, unknown key 'requestsperunit'") } @@ -328,7 +330,7 @@ func TestNonStringKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("non_string_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "non_string_key.yaml: config error, key is not of type string: 0.25") } @@ -339,7 +341,7 @@ func TestNonMapList(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("non_map_list.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "non_map_list.yaml: config error, yaml file contains list of type other than map: a") -} +} \ No newline at end of file diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index 94b63d77..29bbc185 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -1,6 +1,7 @@ package limiter import ( + stats2 "github.com/envoyproxy/ratelimit/test/mocks/stats" "math/rand" "testing" @@ -22,10 +23,11 @@ func TestGenerateCacheKeys(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) timeSource.EXPECT().UnixNow().Return(int64(1234)) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") + baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -40,10 +42,11 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) timeSource.EXPECT().UnixNow().Return(int64(1234)) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:") + baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -57,7 +60,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { defer controller.Finish() localCache := freecache.NewCache(100) localCache.Set([]byte("key"), []byte("value"), 100) - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "") + sm := stats2.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm) // Returns true, as local cache contains over limit value for the key. assert.Equal(true, baseRateLimit.IsOverLimitWithLocalCache("key")) } @@ -66,11 +70,12 @@ func TestNoOverLimitWithLocalCache(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "") + sm := stats2.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm) // Returns false, as local cache is nil. assert.Equal(false, baseRateLimit.IsOverLimitWithLocalCache("domain_key_value_1234")) localCache := freecache.NewCache(100) - baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "") + baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm) // Returns false, as local cache does not contain value for cache key. assert.Equal(false, baseRateLimitWithLocalCache.IsOverLimitWithLocalCache("domain_key_value_1234")) } @@ -79,7 +84,8 @@ func TestGetResponseStatusEmptyKey(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "") + sm := stats2.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm) responseStatus := baseRateLimit.GetResponseDescriptorStatus("", nil, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) @@ -92,8 +98,9 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := stats2.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -112,8 +119,9 @@ func TestGetResponseStatusOverLimit(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) localCache := freecache.NewCache(100) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := stats2.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) @@ -133,8 +141,9 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := stats2.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 847b76f4..b0c15b44 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -5,6 +5,7 @@ package memcached_test import ( + stats2 "github.com/envoyproxy/ratelimit/test/mocks/stats" "math/rand" "strconv" "testing" @@ -34,7 +35,8 @@ func TestMemcached(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -43,7 +45,7 @@ func TestMemcached(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -66,7 +68,7 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -95,8 +97,8 @@ func TestMemcached(t *testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3")), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -120,7 +122,8 @@ func TestMemcachedGetError(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -129,7 +132,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -146,7 +149,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value1_1234", uint64(1)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value1"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value1", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -204,7 +207,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { localCache := freecache.NewCache(100) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, sm, 0.8, "") localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio @@ -217,7 +221,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -296,7 +300,8 @@ func TestNearLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -308,7 +313,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -358,7 +363,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -375,7 +380,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -392,7 +397,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -409,7 +414,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -426,7 +431,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -443,7 +448,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -464,7 +469,8 @@ func TestMemcacheWithJitter(t *testing.T) { client := mock_memcached.NewMockClient(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -485,7 +491,7 @@ func TestMemcacheWithJitter(t *testing.T) { ).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -505,7 +511,8 @@ func TestMemcacheAdd(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := stats2.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") // Test a race condition with the initial add timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) @@ -526,7 +533,7 @@ func TestMemcacheAdd(t *testing.T) { uint64(2), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -549,7 +556,7 @@ func TestMemcacheAdd(t *testing.T) { ).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index 38d5b347..5645fbb8 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -8,8 +8,8 @@ import ( context "context" envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" config "github.com/envoyproxy/ratelimit/src/config" + stats2 "github.com/envoyproxy/ratelimit/src/stats" gomock "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" reflect "reflect" ) @@ -88,7 +88,7 @@ func (m *MockRateLimitConfigLoader) EXPECT() *MockRateLimitConfigLoaderMockRecor } // Load mocks base method -func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats.Scope) config.RateLimitConfig { +func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats2.Manager) config.RateLimitConfig { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Load", arg0, arg1) ret0, _ := ret[0].(config.RateLimitConfig) diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index 43b4fd56..fdba67e9 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -1 +1,47 @@ package stats + +import ( + stat "github.com/envoyproxy/ratelimit/src/stats" + stats "github.com/lyft/gostats" +) + +type MockStatManager struct { + store stats.Store +} + +func (m *MockStatManager) NewShouldRateLimitStats() stat.ShouldRateLimitStats { + return stat.ShouldRateLimitStats{} +} + +func (m *MockStatManager) NewServiceStats() stat.ServiceStats { + return stat.ServiceStats{} +} + +func (m *MockStatManager) NewShouldRateLimitLegacyStats() stat.ShouldRateLimitLegacyStats { + return stat.ShouldRateLimitLegacyStats{} +} + +func (m *MockStatManager) NewStats(key string) stat.RateLimitStats { + ret := m.NewStats(key) + return ret +} + +func (m *MockStatManager) AddTotalHits(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.TotalHits.Add(u) +} + +func (this *MockStatManager) AddOverLimit(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.OverLimit.Add(u) +} + +func (this *MockStatManager) AddNearLimit(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.NearLimit.Add(u) +} + +func (this *MockStatManager) AddOverLimitWithLocalCache(u uint64, rlStats stat.RateLimitStats, key string) { + rlStats.OverLimitWithLocalCache.Add(u) +} + +func NewMockStatManager(store stats.Store) stat.Manager { + return &MockStatManager{store: store} +} diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 6c190ea7..32db1a4e 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -2,6 +2,7 @@ package redis_test import ( "context" + stats2 "github.com/envoyproxy/ratelimit/test/mocks/stats" "runtime" "testing" "time" @@ -41,12 +42,13 @@ func BenchmarkParallelDoLimit(b *testing.B) { mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int) func(*testing.B) { return func(b *testing.B) { statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() - cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "") + cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} // wait for the pool to fill up for { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 080d617d..48915be0 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,6 +1,7 @@ package redis_test import ( + stats2 "github.com/envoyproxy/ratelimit/test/mocks/stats" "testing" "github.com/coocood/freecache" @@ -36,17 +37,18 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() + statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) client := mock_redis.NewMockClient(controller) perSecondClient := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) var cache limiter.RateLimitCache if usePerSecondRedis { - cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") + cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) } else { - cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) } - statsStore := stats.NewStore(stats.NewNullSink(), false) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) var clientUsed *mock_redis.MockClient @@ -61,7 +63,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -85,7 +87,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -111,8 +113,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3")), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -171,9 +173,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "") + statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm) sink := &common.TestStatSink{} - statsStore := stats.NewStore(sink, true) localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio @@ -186,7 +189,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -263,8 +266,9 @@ func TestNearLimit(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -276,7 +280,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -325,7 +329,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -341,7 +345,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -357,7 +361,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -373,7 +377,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -389,7 +393,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -405,7 +409,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -423,8 +427,9 @@ func TestRedisWithJitter(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := stats2.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -433,7 +438,7 @@ func TestRedisWithJitter(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index a51ddbe9..ff0af059 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -93,7 +93,7 @@ func TestServiceLegacy(test *testing.T) { } limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key")), nil} legacyLimits, err := convertRatelimits(limits) if err != nil { @@ -130,7 +130,7 @@ func TestServiceLegacy(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key"))} legacyLimits, err = convertRatelimits(limits) if err != nil { t.assert.FailNow(err.Error()) @@ -153,8 +153,8 @@ func TestServiceLegacy(test *testing.T) { response) t.assert.Nil(err) - t.assert.EqualValues(2, t.statStore.NewCounter("config_load_success").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("config_load_error").Value()) + t.assert.EqualValues(2, t.store.NewCounter("config_load_success").Value()) + t.assert.EqualValues(1, t.store.NewCounter("config_load_error").Value()) } func TestEmptyDomainLegacy(test *testing.T) { @@ -166,8 +166,8 @@ func TestEmptyDomainLegacy(test *testing.T) { response, err := service.GetLegacyService().ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("rate limit domain must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) } func TestEmptyDescriptorsLegacy(test *testing.T) { @@ -179,8 +179,8 @@ func TestEmptyDescriptorsLegacy(test *testing.T) { response, err := service.GetLegacyService().ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("rate limit descriptor list must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) } func TestCacheErrorLegacy(test *testing.T) { @@ -193,7 +193,7 @@ func TestCacheErrorLegacy(test *testing.T) { if err != nil { t.assert.FailNow(err.Error()) } - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, req, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -203,8 +203,8 @@ func TestCacheErrorLegacy(test *testing.T) { response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) t.assert.Nil(response) t.assert.Equal("cache error", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.redis_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.redis_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) } func TestInitialLoadErrorLegacy(test *testing.T) { @@ -221,14 +221,14 @@ func TestInitialLoadErrorLegacy(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Scope) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.sm, true) request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.GetLegacyService().ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("no rate limit configuration loaded", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) } diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 12c77926..e13f8a3d 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -1,6 +1,7 @@ package ratelimit_test import ( + stats2 "github.com/envoyproxy/ratelimit/src/stats" "sync" "testing" @@ -17,6 +18,7 @@ import ( stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" + stats3 "github.com/envoyproxy/ratelimit/test/mocks/stats" ) type barrier struct { @@ -55,7 +57,8 @@ type rateLimitServiceTestSuite struct { configLoader *mock_config.MockRateLimitConfigLoader config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int - statStore stats.Store + sm stats2.Manager + store stats.Scope } func commonSetup(t *testing.T) rateLimitServiceTestSuite { @@ -67,7 +70,8 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) - ret.statStore = stats.NewStore(stats.NewNullSink(), false) + ret.store = stats.NewStore(stats.NewNullSink(), false) + ret.sm = stats3.NewMockStatManager(ret.store.Store()) return ret } @@ -82,7 +86,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore, true) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.sm, true) } func TestService(test *testing.T) { @@ -117,7 +121,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key")), nil} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -149,7 +153,7 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( @@ -167,8 +171,8 @@ func TestService(test *testing.T) { response) t.assert.Nil(err) - t.assert.EqualValues(2, t.statStore.NewCounter("config_load_success").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("config_load_error").Value()) + t.assert.EqualValues(2, t.store.NewCounter("config_load_success").Value()) + t.assert.EqualValues(1, t.store.NewCounter("config_load_error").Value()) } func TestEmptyDomain(test *testing.T) { @@ -180,7 +184,7 @@ func TestEmptyDomain(test *testing.T) { response, err := service.ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("rate limit domain must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) } func TestEmptyDescriptors(test *testing.T) { @@ -192,7 +196,7 @@ func TestEmptyDescriptors(test *testing.T) { response, err := service.ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("rate limit descriptor list must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) } func TestCacheError(test *testing.T) { @@ -201,7 +205,7 @@ func TestCacheError(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.sm.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -211,7 +215,7 @@ func TestCacheError(test *testing.T) { response, err := service.ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("cache error", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.redis_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.redis_error").Value()) } func TestInitialLoadError(test *testing.T) { @@ -228,11 +232,11 @@ func TestInitialLoadError(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Scope) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.sm, true) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(nil, request) t.assert.Nil(response) t.assert.Equal("no rate limit configuration loaded", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.store.NewCounter("call.should_rate_limit.service_error").Value()) } From 7b2494550a9adfe3f5277ecb27273bbd30c1481f Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Wed, 17 Feb 2021 15:45:59 -0300 Subject: [PATCH 06/26] fix stackoverflow --- test/mocks/stats/manager.go | 15 +++++++++++---- test/service/ratelimit_test.go | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index fdba67e9..c52a7f06 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -3,10 +3,11 @@ package stats import ( stat "github.com/envoyproxy/ratelimit/src/stats" stats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" ) type MockStatManager struct { - store stats.Store + scope stats.Scope } func (m *MockStatManager) NewShouldRateLimitStats() stat.ShouldRateLimitStats { @@ -22,7 +23,13 @@ func (m *MockStatManager) NewShouldRateLimitLegacyStats() stat.ShouldRateLimitLe } func (m *MockStatManager) NewStats(key string) stat.RateLimitStats { - ret := m.NewStats(key) + ret := stat.RateLimitStats{} + logger.Debugf("outputing test stats %s", key) + ret.Key = key + ret.TotalHits = m.scope.NewCounter(key + ".detailed_total_hits") + ret.OverLimit = m.scope.NewCounter(key + ".detailed_over_limit") + ret.NearLimit = m.scope.NewCounter(key + ".detailed_near_limit") + ret.OverLimitWithLocalCache = m.scope.NewCounter(key + ".detailed_over_limit_with_local_cache") return ret } @@ -42,6 +49,6 @@ func (this *MockStatManager) AddOverLimitWithLocalCache(u uint64, rlStats stat.R rlStats.OverLimitWithLocalCache.Add(u) } -func NewMockStatManager(store stats.Store) stat.Manager { - return &MockStatManager{store: store} +func NewMockStatManager(store stats.Scope) stat.Manager { + return &MockStatManager{scope: store} } diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index e13f8a3d..7aaba1bd 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -71,7 +71,7 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.store = stats.NewStore(stats.NewNullSink(), false) - ret.sm = stats3.NewMockStatManager(ret.store.Store()) + ret.sm = stats3.NewMockStatManager(ret.store) return ret } From 84d912356db9b2c7bb81e00a89baf02d5a2738d0 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Thu, 18 Feb 2021 16:58:46 -0300 Subject: [PATCH 07/26] fix unit tests --- src/limiter/base_limiter.go | 2 +- src/stats/manager_impl.go | 8 ++++---- test/mocks/stats/manager.go | 29 +++++++++++++++++++-------- test/service/ratelimit_legacy_test.go | 8 ++++---- test/service/ratelimit_test.go | 6 +++--- 5 files changed, 33 insertions(+), 20 deletions(-) diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 6d48b79d..3a0cccd5 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -136,7 +136,7 @@ func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsA // Otherwise, only the difference between the current limit value and the over limit threshold // were over limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { - this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend), limitInfo.limit.Stats, key) + this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, key) } else { this.Manager.AddOverLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold), limitInfo.limit.Stats, key) diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index f440bd22..1158eca5 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -65,10 +65,10 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { ret := RateLimitStats{} logger.Debugf("outputing test stats %s", key) ret.Key = key - ret.TotalHits = this.scope.NewCounter(key + ".detailed_total_hits") - ret.OverLimit = this.scope.NewCounter(key + ".detailed_over_limit") - ret.NearLimit = this.scope.NewCounter(key + ".detailed_near_limit") - ret.OverLimitWithLocalCache = this.scope.NewCounter(key + ".detailed_over_limit_with_local_cache") + ret.TotalHits = this.scope.NewCounter(key + ".total_hits") + ret.OverLimit = this.scope.NewCounter(key + ".over_limit") + ret.NearLimit = this.scope.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = this.scope.NewCounter(key + ".over_limit_with_local_cache") return ret } diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index c52a7f06..4b161687 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -11,25 +11,38 @@ type MockStatManager struct { } func (m *MockStatManager) NewShouldRateLimitStats() stat.ShouldRateLimitStats { - return stat.ShouldRateLimitStats{} + s := m.scope.Scope("call.should_rate_limit") + ret := stat.ShouldRateLimitStats{} + ret.RedisError = s.NewCounter("redis_error") + ret.ServiceError = s.NewCounter("service_error") + return ret } func (m *MockStatManager) NewServiceStats() stat.ServiceStats { - return stat.ServiceStats{} + ret := stat.ServiceStats{} + ret.ConfigLoadSuccess = m.scope.NewCounter("config_load_success") + ret.ConfigLoadError = m.scope.NewCounter("config_load_error") + ret.ShouldRateLimit = m.NewShouldRateLimitStats() + return ret } func (m *MockStatManager) NewShouldRateLimitLegacyStats() stat.ShouldRateLimitLegacyStats { - return stat.ShouldRateLimitLegacyStats{} + s := m.scope.Scope("call.should_rate_limit_legacy") + return stat.ShouldRateLimitLegacyStats{ + ReqConversionError: s.NewCounter("req_conversion_error"), + RespConversionError: s.NewCounter("resp_conversion_error"), + ShouldRateLimitError: s.NewCounter("should_rate_limit_error"), + } } - +//todo: consider not using mock since pretty much all logic is repeated. func (m *MockStatManager) NewStats(key string) stat.RateLimitStats { ret := stat.RateLimitStats{} logger.Debugf("outputing test stats %s", key) ret.Key = key - ret.TotalHits = m.scope.NewCounter(key + ".detailed_total_hits") - ret.OverLimit = m.scope.NewCounter(key + ".detailed_over_limit") - ret.NearLimit = m.scope.NewCounter(key + ".detailed_near_limit") - ret.OverLimitWithLocalCache = m.scope.NewCounter(key + ".detailed_over_limit_with_local_cache") + ret.TotalHits = m.scope.NewCounter(key + ".total_hits") + ret.OverLimit = m.scope.NewCounter(key + ".over_limit") + ret.NearLimit = m.scope.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = m.scope.NewCounter(key + ".over_limit_with_local_cache") return ret } diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index ff0af059..47271684 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -1,6 +1,7 @@ package ratelimit_test import ( + stats2 "github.com/envoyproxy/ratelimit/src/stats" "testing" core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -14,7 +15,6 @@ import ( "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/mock/gomock" - "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -80,7 +80,7 @@ func TestServiceLegacy(test *testing.T) { barrier := newBarrier() t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { barrier.signal() }).Return(t.config) + func([]config.RateLimitConfigToLoad, stats2.Manager) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -120,7 +120,7 @@ func TestServiceLegacy(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats2.Manager) { barrier.signal() panic(config.RateLimitConfigError("load error")) }) @@ -218,7 +218,7 @@ func TestInitialLoadErrorLegacy(test *testing.T) { t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats2.Manager) { panic(config.RateLimitConfigError("load error")) }) service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.sm, true) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 7aaba1bd..8cacbee1 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -113,7 +113,7 @@ func TestService(test *testing.T) { barrier := newBarrier() t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { barrier.signal() }).Return(t.config) + func([]config.RateLimitConfigToLoad, stats2.Manager) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -143,7 +143,7 @@ func TestService(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats2.Manager) { barrier.signal() panic(config.RateLimitConfigError("load error")) }) @@ -229,7 +229,7 @@ func TestInitialLoadError(test *testing.T) { t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats2.Manager) { panic(config.RateLimitConfigError("load error")) }) service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.sm, true) From 01cca48aa13e9569c3e45545f0955bf3a28436be Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Fri, 19 Feb 2021 10:23:52 -0300 Subject: [PATCH 08/26] Manager uses stats store --- src/server/server_impl.go | 9 +++++---- src/service_cmd/runner/runner.go | 18 +++++++----------- src/stats/manager.go | 3 +++ src/stats/manager_impl.go | 28 ++++++++++++++++------------ test/mocks/stats/manager.go | 26 +++++++++++++++----------- test/service/ratelimit_test.go | 2 +- 6 files changed, 47 insertions(+), 39 deletions(-) diff --git a/src/server/server_impl.go b/src/server/server_impl.go index b60d1e32..ef7b59e6 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -4,6 +4,7 @@ import ( "bytes" "expvar" "fmt" + stats2 "github.com/envoyproxy/ratelimit/src/stats" "io" "net/http" "net/http/pprof" @@ -168,11 +169,11 @@ func (server *server) Runtime() loader.IFace { return server.runtime } -func NewServer(s settings.Settings, name string, store stats.Store, localCache *freecache.Cache, opts ...settings.Option) Server { - return newServer(s, name, store, localCache, opts...) +func NewServer(s settings.Settings, name string, manager stats2.Manager, localCache *freecache.Cache, opts ...settings.Option) Server { + return newServer(s, name, manager, localCache, opts...) } -func newServer(s settings.Settings, name string, store stats.Store, localCache *freecache.Cache, opts ...settings.Option) *server { +func newServer(s settings.Settings, name string, manager stats2.Manager, localCache *freecache.Cache, opts ...settings.Option) *server { for _, opt := range opts { opt(&s) } @@ -186,7 +187,7 @@ func newServer(s settings.Settings, name string, store stats.Store, localCache * ret.debugPort = s.DebugPort // setup stats - ret.store = store + ret.store = manager.GetStatsStore() ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) if localCache != nil { diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index c4c9ac17..0e749ad8 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -28,7 +28,7 @@ import ( ) type Runner struct { - statsStore stats.Store + manager stats2.Manager settings settings.Settings srv server.Server mu sync.Mutex @@ -36,17 +36,16 @@ type Runner struct { func NewRunner(s settings.Settings) Runner { return Runner{ - statsStore: stats.NewDefaultStore(), + manager: stats2.NewStatManager(stats.NewDefaultStore(), s.DetailedMetrics), settings: s, } } func (runner *Runner) GetStatsStore() stats.Store { - return runner.statsStore + return runner.manager.GetStatsStore() } -func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache) limiter.RateLimitCache { - manager := stats2.NewStatManager(srv.Scope(), s.DetailedMetrics) +func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, manager stats2.Manager) limiter.RateLimitCache { switch s.BackendType { case "redis", "": return redis.NewRateLimiterCacheImplFromSettings( @@ -95,19 +94,16 @@ func (runner *Runner) Run() { localCache = freecache.NewCache(s.LocalCacheSizeInBytes) } - srv := server.NewServer(s, "ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) + srv := server.NewServer(s, "ratelimit", runner.manager, localCache, settings.GrpcUnaryInterceptor(nil)) runner.mu.Lock() runner.srv = srv runner.mu.Unlock() - - manager := stats2.NewStatManager(srv.Scope().Scope("service"), s.DetailedMetrics) - service := ratelimit.NewService( srv.Runtime(), - createLimiter(srv, s, localCache), + createLimiter(srv, s, localCache, runner.manager), config.NewRateLimitConfigLoaderImpl(), - manager, + runner.manager, s.RuntimeWatchRoot, ) diff --git a/src/stats/manager.go b/src/stats/manager.go index bdd22512..8dea48bc 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -1,5 +1,7 @@ package stats +import stats "github.com/lyft/gostats" + type Manager interface { AddTotalHits(u uint64, rlStats RateLimitStats, key string) AddOverLimit(u uint64, rlStats RateLimitStats, key string) @@ -9,4 +11,5 @@ type Manager interface { NewShouldRateLimitStats() ShouldRateLimitStats NewServiceStats() ServiceStats NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats + GetStatsStore() stats.Store } diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 1158eca5..c3a91b0b 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -6,18 +6,22 @@ import ( logger "github.com/sirupsen/logrus" ) -func NewStatManager(scope gostats.Scope, detailedMetrics bool) *ManagerImpl { +func NewStatManager(store gostats.Store, detailedMetrics bool) *ManagerImpl { return &ManagerImpl{ - scope: scope, + store: store, detailed: detailedMetrics, } } type ManagerImpl struct { - scope gostats.Scope + store gostats.Store detailed bool } +func (this *ManagerImpl) GetStatsStore() gostats.Store { + return this.store +} + func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, key string) { rlStats.TotalHits.Add(u) if this.detailed { @@ -58,17 +62,17 @@ func (this *ManagerImpl) getDescriptorStat(key string) RateLimitStats { } // Create new rate descriptor stats for a descriptor tuple. -// @param statsScope supplies the owning scope. +// @param statsScope supplies the owning store. // @param key supplies the fully resolved descriptor tuple. // @return new stats. func (this *ManagerImpl) NewStats(key string) RateLimitStats { ret := RateLimitStats{} logger.Debugf("outputing test stats %s", key) ret.Key = key - ret.TotalHits = this.scope.NewCounter(key + ".total_hits") - ret.OverLimit = this.scope.NewCounter(key + ".over_limit") - ret.NearLimit = this.scope.NewCounter(key + ".near_limit") - ret.OverLimitWithLocalCache = this.scope.NewCounter(key + ".over_limit_with_local_cache") + ret.TotalHits = this.store.NewCounter(key + ".total_hits") + ret.OverLimit = this.store.NewCounter(key + ".over_limit") + ret.NearLimit = this.store.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = this.store.NewCounter(key + ".over_limit_with_local_cache") return ret } @@ -81,7 +85,7 @@ type ShouldRateLimitLegacyStats struct { func (this *ManagerImpl) NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats { - s := this.scope.Scope("call.should_rate_limit_legacy") + s := this.store.Scope("call.should_rate_limit_legacy") return ShouldRateLimitLegacyStats{ ReqConversionError: s.NewCounter("req_conversion_error"), RespConversionError: s.NewCounter("resp_conversion_error"), @@ -95,7 +99,7 @@ type ShouldRateLimitStats struct { } func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { - s := this.scope.Scope("call.should_rate_limit") + s := this.store.Scope("call.should_rate_limit") ret := ShouldRateLimitStats{} ret.RedisError = s.NewCounter("redis_error") ret.ServiceError = s.NewCounter("service_error") @@ -110,8 +114,8 @@ type ServiceStats struct { func (this *ManagerImpl) NewServiceStats() ServiceStats { ret := ServiceStats{} - ret.ConfigLoadSuccess = this.scope.NewCounter("config_load_success") - ret.ConfigLoadError = this.scope.NewCounter("config_load_error") + ret.ConfigLoadSuccess = this.store.NewCounter("config_load_success") + ret.ConfigLoadError = this.store.NewCounter("config_load_error") ret.ShouldRateLimit = this.NewShouldRateLimitStats() return ret } diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index 4b161687..b0d49e8e 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -7,11 +7,15 @@ import ( ) type MockStatManager struct { - scope stats.Scope + store stats.Store +} + +func (m *MockStatManager) GetStatsStore() stats.Store { + return m.store } func (m *MockStatManager) NewShouldRateLimitStats() stat.ShouldRateLimitStats { - s := m.scope.Scope("call.should_rate_limit") + s := m.store.Scope("call.should_rate_limit") ret := stat.ShouldRateLimitStats{} ret.RedisError = s.NewCounter("redis_error") ret.ServiceError = s.NewCounter("service_error") @@ -20,14 +24,14 @@ func (m *MockStatManager) NewShouldRateLimitStats() stat.ShouldRateLimitStats { func (m *MockStatManager) NewServiceStats() stat.ServiceStats { ret := stat.ServiceStats{} - ret.ConfigLoadSuccess = m.scope.NewCounter("config_load_success") - ret.ConfigLoadError = m.scope.NewCounter("config_load_error") + ret.ConfigLoadSuccess = m.store.NewCounter("config_load_success") + ret.ConfigLoadError = m.store.NewCounter("config_load_error") ret.ShouldRateLimit = m.NewShouldRateLimitStats() return ret } func (m *MockStatManager) NewShouldRateLimitLegacyStats() stat.ShouldRateLimitLegacyStats { - s := m.scope.Scope("call.should_rate_limit_legacy") + s := m.store.Scope("call.should_rate_limit_legacy") return stat.ShouldRateLimitLegacyStats{ ReqConversionError: s.NewCounter("req_conversion_error"), RespConversionError: s.NewCounter("resp_conversion_error"), @@ -39,10 +43,10 @@ func (m *MockStatManager) NewStats(key string) stat.RateLimitStats { ret := stat.RateLimitStats{} logger.Debugf("outputing test stats %s", key) ret.Key = key - ret.TotalHits = m.scope.NewCounter(key + ".total_hits") - ret.OverLimit = m.scope.NewCounter(key + ".over_limit") - ret.NearLimit = m.scope.NewCounter(key + ".near_limit") - ret.OverLimitWithLocalCache = m.scope.NewCounter(key + ".over_limit_with_local_cache") + ret.TotalHits = m.store.NewCounter(key + ".total_hits") + ret.OverLimit = m.store.NewCounter(key + ".over_limit") + ret.NearLimit = m.store.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = m.store.NewCounter(key + ".over_limit_with_local_cache") return ret } @@ -62,6 +66,6 @@ func (this *MockStatManager) AddOverLimitWithLocalCache(u uint64, rlStats stat.R rlStats.OverLimitWithLocalCache.Add(u) } -func NewMockStatManager(store stats.Scope) stat.Manager { - return &MockStatManager{scope: store} +func NewMockStatManager(store stats.Store) stat.Manager { + return &MockStatManager{store: store} } diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 8cacbee1..587a7667 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -58,7 +58,7 @@ type rateLimitServiceTestSuite struct { config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int sm stats2.Manager - store stats.Scope + store stats.Store } func commonSetup(t *testing.T) rateLimitServiceTestSuite { From 419f3b51cc2fa7eb420d32a06a61ffdb2b24bee8 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 22 Feb 2021 14:17:02 -0300 Subject: [PATCH 09/26] Stat manager manages multiple scopes --- src/stats/manager_impl.go | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index c3a91b0b..d4879538 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -7,14 +7,21 @@ import ( ) func NewStatManager(store gostats.Store, detailedMetrics bool) *ManagerImpl { + serviceScope := store.Scope("service") return &ManagerImpl{ - store: store, - detailed: detailedMetrics, + store: store, + rlStatsScope: serviceScope.Scope("rate_limit"), + legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), + serviceStatsScope: serviceScope, + detailed: detailedMetrics, } } type ManagerImpl struct { store gostats.Store + rlStatsScope gostats.Scope + legacyStatsScope gostats.Scope + serviceStatsScope gostats.Scope detailed bool } @@ -69,10 +76,10 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { ret := RateLimitStats{} logger.Debugf("outputing test stats %s", key) ret.Key = key - ret.TotalHits = this.store.NewCounter(key + ".total_hits") - ret.OverLimit = this.store.NewCounter(key + ".over_limit") - ret.NearLimit = this.store.NewCounter(key + ".near_limit") - ret.OverLimitWithLocalCache = this.store.NewCounter(key + ".over_limit_with_local_cache") + ret.TotalHits = this.rlStatsScope.NewCounter(key + ".total_hits") + ret.OverLimit = this.rlStatsScope.NewCounter(key + ".over_limit") + ret.NearLimit = this.rlStatsScope.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = this.rlStatsScope.NewCounter(key + ".over_limit_with_local_cache") return ret } @@ -85,11 +92,10 @@ type ShouldRateLimitLegacyStats struct { func (this *ManagerImpl) NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats { - s := this.store.Scope("call.should_rate_limit_legacy") return ShouldRateLimitLegacyStats{ - ReqConversionError: s.NewCounter("req_conversion_error"), - RespConversionError: s.NewCounter("resp_conversion_error"), - ShouldRateLimitError: s.NewCounter("should_rate_limit_error"), + ReqConversionError: this.legacyStatsScope.NewCounter("req_conversion_error"), + RespConversionError: this.legacyStatsScope.NewCounter("resp_conversion_error"), + ShouldRateLimitError: this.legacyStatsScope.NewCounter("should_rate_limit_error"), } } @@ -99,7 +105,7 @@ type ShouldRateLimitStats struct { } func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { - s := this.store.Scope("call.should_rate_limit") + s := this.serviceStatsScope.Scope("call.should_rate_limit") ret := ShouldRateLimitStats{} ret.RedisError = s.NewCounter("redis_error") ret.ServiceError = s.NewCounter("service_error") @@ -114,8 +120,8 @@ type ServiceStats struct { func (this *ManagerImpl) NewServiceStats() ServiceStats { ret := ServiceStats{} - ret.ConfigLoadSuccess = this.store.NewCounter("config_load_success") - ret.ConfigLoadError = this.store.NewCounter("config_load_error") + ret.ConfigLoadSuccess = this.serviceStatsScope.NewCounter("config_load_success") + ret.ConfigLoadError = this.serviceStatsScope.NewCounter("config_load_error") ret.ShouldRateLimit = this.NewShouldRateLimitStats() return ret } From 45f9174ff632cc332ab567c954411ebb0ca2139b Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 22 Feb 2021 14:41:49 -0300 Subject: [PATCH 10/26] integration test fix --- src/config_check_cmd/main.go | 2 +- src/service_cmd/runner/runner.go | 2 +- src/stats/manager_impl.go | 7 ++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index e29fcea6..463f8d2e 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -23,7 +23,7 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { }() settingStruct := settings.NewSettings() dummyStats := stats.NewStore(stats.NewNullSink(), false) - manager := stat.NewStatManager(dummyStats, settingStruct.DetailedMetrics) + manager := stat.NewStatManager(dummyStats, settingStruct) config.NewRateLimitConfigImpl(allConfigs, manager) } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 0e749ad8..5a2203ff 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -36,7 +36,7 @@ type Runner struct { func NewRunner(s settings.Settings) Runner { return Runner{ - manager: stats2.NewStatManager(stats.NewDefaultStore(), s.DetailedMetrics), + manager: stats2.NewStatManager(stats.NewDefaultStore(), s), settings: s, } } diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index d4879538..a01c8f76 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -2,18 +2,19 @@ package stats import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/settings" gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" ) -func NewStatManager(store gostats.Store, detailedMetrics bool) *ManagerImpl { - serviceScope := store.Scope("service") +func NewStatManager(store gostats.Store, s settings.Settings) *ManagerImpl { + serviceScope := store.ScopeWithTags("ratelimit", s.ExtraTags).Scope("service") return &ManagerImpl{ store: store, rlStatsScope: serviceScope.Scope("rate_limit"), legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), serviceStatsScope: serviceScope, - detailed: detailedMetrics, + detailed: s.DetailedMetrics, } } From aa1e0984bec01ff82dcd33e23c9d10bdf6472ae4 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 22 Feb 2021 14:42:17 -0300 Subject: [PATCH 11/26] go fmt --- src/config/config_impl.go | 6 +++--- src/limiter/base_limiter.go | 14 +++++++------- src/service/ratelimit.go | 1 + src/service_cmd/runner/runner.go | 12 ++++++------ src/settings/settings.go | 2 +- src/stats/manager_impl.go | 18 ++++++++---------- test/config/config_test.go | 2 +- test/mocks/stats/manager.go | 1 + test/service/ratelimit_test.go | 2 +- 9 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 433d95a0..5de4cf3a 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -6,10 +6,10 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + stat "github.com/envoyproxy/ratelimit/src/stats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "gopkg.in/yaml.v2" - stat "github.com/envoyproxy/ratelimit/src/stats" ) type yamlRateLimit struct { @@ -39,8 +39,8 @@ type rateLimitDomain struct { } type rateLimitConfigImpl struct { - domains map[string]*rateLimitDomain - manager stat.Manager + domains map[string]*rateLimitDomain + manager stat.Manager } var validKeys = map[string]bool{ diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 3a0cccd5..0ec9d08b 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -49,7 +49,7 @@ func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { - this.Manager.AddTotalHits(uint64(hitsAddend),limits[i].Stats, cacheKeys[i].Key) + this.Manager.AddTotalHits(uint64(hitsAddend), limits[i].Stats, cacheKeys[i].Key) } } return cacheKeys @@ -76,8 +76,8 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * nil, 0) } if isOverLimitWithLocalCache { - this.Manager.AddOverLimit(uint64(hitsAddend),limitInfo.limit.Stats,key) - this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend),limitInfo.limit.Stats,key) + this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, key) + this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend), limitInfo.limit.Stats, key) return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) } @@ -125,7 +125,7 @@ func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expira cacheKeyGenerator: NewCacheKeyGenerator(cacheKeyPrefix), localCache: localCache, nearLimitRatio: nearLimitRatio, - Manager: manager, + Manager: manager, } } @@ -138,12 +138,12 @@ func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsA if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, key) } else { - this.Manager.AddOverLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold), limitInfo.limit.Stats, key) + this.Manager.AddOverLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.overLimitThreshold), limitInfo.limit.Stats, key) // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. this.Manager.AddNearLimit( - uint64(limitInfo.overLimitThreshold - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)), + uint64(limitInfo.overLimitThreshold-utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)), limitInfo.limit.Stats, key, ) @@ -159,7 +159,7 @@ func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsA if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold { this.Manager.AddNearLimit(uint64(hitsAddend), limitInfo.limit.Stats, key) } else { - this.Manager.AddNearLimit(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold), limitInfo.limit.Stats, key) + this.Manager.AddNearLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.nearLimitThreshold), limitInfo.limit.Stats, key) } } } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index f0eecbf6..1acd5953 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -170,6 +170,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { defer this.configLock.RUnlock() return this.config } + //todo: add methods to interface func NewService(runtime loader.IFace, cache limiter.RateLimitCache, configLoader config.RateLimitConfigLoader, manager stats2.Manager, runtimeWatchRoot bool) RateLimitServiceServer { diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 5a2203ff..c66e384d 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -28,16 +28,16 @@ import ( ) type Runner struct { - manager stats2.Manager - settings settings.Settings - srv server.Server - mu sync.Mutex + manager stats2.Manager + settings settings.Settings + srv server.Server + mu sync.Mutex } func NewRunner(s settings.Settings) Runner { return Runner{ - manager: stats2.NewStatManager(stats.NewDefaultStore(), s), - settings: s, + manager: stats2.NewStatManager(stats.NewDefaultStore(), s), + settings: s, } } diff --git a/src/settings/settings.go b/src/settings/settings.go index b91eb0e1..40c755bd 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -61,7 +61,7 @@ type Settings struct { MemcacheHostPort string `envconfig:"MEMCACHE_HOST_PORT" default:""` //Detailed Metrics Mode - DetailedMetrics bool `envconfig:"DETAILED_METRICS_MODE" default:"false"` + DetailedMetrics bool `envconfig:"DETAILED_METRICS_MODE" default:"false"` } type Option func(*Settings) diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index a01c8f76..c22cfc7c 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -19,11 +19,11 @@ func NewStatManager(store gostats.Store, s settings.Settings) *ManagerImpl { } type ManagerImpl struct { - store gostats.Store - rlStatsScope gostats.Scope - legacyStatsScope gostats.Scope - serviceStatsScope gostats.Scope - detailed bool + store gostats.Store + rlStatsScope gostats.Scope + legacyStatsScope gostats.Scope + serviceStatsScope gostats.Scope + detailed bool } func (this *ManagerImpl) GetStatsStore() gostats.Store { @@ -84,14 +84,12 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { return ret } - type ShouldRateLimitLegacyStats struct { ReqConversionError gostats.Counter RespConversionError gostats.Counter ShouldRateLimitError gostats.Counter } - func (this *ManagerImpl) NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats { return ShouldRateLimitLegacyStats{ ReqConversionError: this.legacyStatsScope.NewCounter("req_conversion_error"), @@ -119,7 +117,7 @@ type ServiceStats struct { ShouldRateLimit ShouldRateLimitStats } -func (this *ManagerImpl) NewServiceStats() ServiceStats { +func (this *ManagerImpl) NewServiceStats() ServiceStats { ret := ServiceStats{} ret.ConfigLoadSuccess = this.serviceStatsScope.NewCounter("config_load_success") ret.ConfigLoadError = this.serviceStatsScope.NewCounter("config_load_error") @@ -141,7 +139,6 @@ func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) str return domain + "." + rateLimitKey } - // Stats for an individual rate limit config entry. //todo: unexport fields type RateLimitStats struct { @@ -151,6 +148,7 @@ type RateLimitStats struct { NearLimit gostats.Counter OverLimitWithLocalCache gostats.Counter } + func (this RateLimitStats) String() string { return this.Key -} \ No newline at end of file +} diff --git a/test/config/config_test.go b/test/config/config_test.go index 429bc590..5a683dd5 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -344,4 +344,4 @@ func TestNonMapList(t *testing.T) { mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "non_map_list.yaml: config error, yaml file contains list of type other than map: a") -} \ No newline at end of file +} diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index b0d49e8e..8e1d7b69 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -38,6 +38,7 @@ func (m *MockStatManager) NewShouldRateLimitLegacyStats() stat.ShouldRateLimitLe ShouldRateLimitError: s.NewCounter("should_rate_limit_error"), } } + //todo: consider not using mock since pretty much all logic is repeated. func (m *MockStatManager) NewStats(key string) stat.RateLimitStats { ret := stat.RateLimitStats{} diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 587a7667..7d5fbad9 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -14,11 +14,11 @@ import ( mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + stats3 "github.com/envoyproxy/ratelimit/test/mocks/stats" "github.com/golang/mock/gomock" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" - stats3 "github.com/envoyproxy/ratelimit/test/mocks/stats" ) type barrier struct { From d102107326793a40c6cf5bcb3f52456fdf268449 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 22 Feb 2021 15:13:06 -0300 Subject: [PATCH 12/26] package naming --- src/config/config.go | 6 +++--- src/config/config_impl.go | 14 +++++++------- src/config_check_cmd/main.go | 7 +++---- src/memcached/cache_impl.go | 8 ++++---- src/redis/cache_impl.go | 4 ++-- src/redis/fixed_cache_impl.go | 4 ++-- src/server/server_impl.go | 24 ++++++++++++------------ src/service/ratelimit.go | 8 ++++---- src/service/ratelimit_legacy.go | 4 ++-- 9 files changed, 39 insertions(+), 40 deletions(-) diff --git a/src/config/config.go b/src/config/config.go index d7f05132..49b5e280 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -3,7 +3,7 @@ package config import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stat "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" ) @@ -17,7 +17,7 @@ func (e RateLimitConfigError) Error() string { // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { FullKey string - Stats stat.RateLimitStats + Stats stats.RateLimitStats Limit *pb.RateLimitResponse_RateLimit } @@ -47,5 +47,5 @@ type RateLimitConfigLoader interface { // @param statsScope supplies the stats scope to use for limit stats during runtime. // @return a new configuration. // @throws RateLimitConfigError if the configuration could not be created. - Load(configs []RateLimitConfigToLoad, manager stat.Manager) RateLimitConfig + Load(configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig } diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 5de4cf3a..491cdfc8 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -6,7 +6,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stat "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "gopkg.in/yaml.v2" @@ -40,7 +40,7 @@ type rateLimitDomain struct { type rateLimitConfigImpl struct { domains map[string]*rateLimitDomain - manager stat.Manager + manager stats.Manager } var validKeys = map[string]bool{ @@ -60,7 +60,7 @@ var validKeys = map[string]bool{ // @param scope supplies the owning scope. // @return the new config entry. func NewRateLimit( - requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stat.RateLimitStats) *RateLimit { + requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit { return &RateLimit{FullKey: rlStats.String(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} } @@ -91,7 +91,7 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit // @param parentKey supplies the fully resolved key name that owns this config level. // @param descriptors supplies the YAML descriptors to load. // @param statsScope supplies the owning scope. -func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, manager stat.Manager) { +func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, manager stats.Manager) { for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { @@ -236,7 +236,7 @@ func (this *rateLimitConfigImpl) GetLimit( } if descriptor.GetLimit() != nil { - rateLimitKey := stat.DescriptorKey(domain, descriptor) + rateLimitKey := stats.DescriptorKey(domain, descriptor) rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit()) rateLimit = NewRateLimit( descriptor.GetLimit().GetRequestsPerUnit(), @@ -283,7 +283,7 @@ func (this *rateLimitConfigImpl) GetLimit( // @param stats supplies the stats scope to use for limit stats during runtime. // @return a new config. func NewRateLimitConfigImpl( - configs []RateLimitConfigToLoad, manager stat.Manager) RateLimitConfig { + configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig { ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, manager} for _, config := range configs { @@ -296,7 +296,7 @@ func NewRateLimitConfigImpl( type rateLimitConfigLoaderImpl struct{} func (this *rateLimitConfigLoaderImpl) Load( - configs []RateLimitConfigToLoad, manager stat.Manager) RateLimitConfig { + configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig { return NewRateLimitConfigImpl(configs, manager) } diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index 463f8d2e..d5751cc7 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -4,13 +4,13 @@ import ( "flag" "fmt" "github.com/envoyproxy/ratelimit/src/settings" - stat "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "io/ioutil" "os" "path/filepath" "github.com/envoyproxy/ratelimit/src/config" - "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" ) func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { @@ -22,8 +22,7 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { } }() settingStruct := settings.NewSettings() - dummyStats := stats.NewStore(stats.NewNullSink(), false) - manager := stat.NewStatManager(dummyStats, settingStruct) + manager := stats.NewStatManager(gostats.NewStore(gostats.NewNullSink(), false), settingStruct) config.NewRateLimitConfigImpl(allConfigs, manager) } diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 90b397c8..78be2171 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -17,13 +17,13 @@ package memcached import ( "context" - stat "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "strconv" "sync" "github.com/coocood/freecache" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/bradfitz/gomemcache/memcache" @@ -175,7 +175,7 @@ func (this *rateLimitMemcacheImpl) Flush() { } func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, manager stat.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, manager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { return &rateLimitMemcacheImpl{ client: client, timeSource: timeSource, @@ -188,7 +188,7 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan } func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, - localCache *freecache.Cache, scope stats.Scope, manager stat.Manager) limiter.RateLimitCache { + localCache *freecache.Cache, scope gostats.Scope, manager stats.Manager) limiter.RateLimitCache { return NewRateLimitCacheImpl( CollectStats(memcache.New(s.MemcacheHostPort), scope.Scope("memcache")), timeSource, diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 36efd85b..a6ddfe85 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,7 +1,7 @@ package redis import ( - stat "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" @@ -11,7 +11,7 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) -func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, manager stat.Manager) limiter.RateLimitCache { +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, manager stats.Manager) limiter.RateLimitCache { var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 387ad727..2a0d5f7f 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,7 +1,7 @@ package redis import ( - stat "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" @@ -108,7 +108,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( func (this *fixedRateLimitCacheImpl) Flush() {} func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource, - jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stat.Manager) limiter.RateLimitCache { + jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stats.Manager) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, diff --git a/src/server/server_impl.go b/src/server/server_impl.go index ef7b59e6..b9499b55 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -4,7 +4,7 @@ import ( "bytes" "expvar" "fmt" - stats2 "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "io" "net/http" "net/http/pprof" @@ -26,7 +26,7 @@ import ( "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" "github.com/lyft/goruntime/loader" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/health" @@ -45,8 +45,8 @@ type server struct { debugPort int router *mux.Router grpcServer *grpc.Server - store stats.Store - scope stats.Scope + store gostats.Store + scope gostats.Scope runtime loader.IFace debugListener serverDebugListener httpServer *http.Server @@ -161,7 +161,7 @@ func (server *server) startGrpc() { server.grpcServer.Serve(lis) } -func (server *server) Scope() stats.Scope { +func (server *server) Scope() gostats.Scope { return server.scope } @@ -169,11 +169,11 @@ func (server *server) Runtime() loader.IFace { return server.runtime } -func NewServer(s settings.Settings, name string, manager stats2.Manager, localCache *freecache.Cache, opts ...settings.Option) Server { +func NewServer(s settings.Settings, name string, manager stats.Manager, localCache *freecache.Cache, opts ...settings.Option) Server { return newServer(s, name, manager, localCache, opts...) } -func newServer(s settings.Settings, name string, manager stats2.Manager, localCache *freecache.Cache, opts ...settings.Option) *server { +func newServer(s settings.Settings, name string, manager stats.Manager, localCache *freecache.Cache, opts ...settings.Option) *server { for _, opt := range opts { opt(&s) } @@ -186,10 +186,10 @@ func newServer(s settings.Settings, name string, manager stats2.Manager, localCa ret.grpcPort = s.GrpcPort ret.debugPort = s.DebugPort - // setup stats + // setup gostats ret.store = manager.GetStatsStore() ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) - ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) + ret.store.AddStatGenerator(gostats.NewRuntimeStats(ret.scope.Scope("go"))) if localCache != nil { ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } @@ -245,10 +245,10 @@ func newServer(s settings.Settings, name string, manager stats2.Manager, localCa pprof.Profile(writer, request) }) - // setup stats endpoint + // setup gostats endpoint ret.AddDebugHttpEndpoint( - "/stats", - "print out stats", + "/gostats", + "print out gostats", func(writer http.ResponseWriter, request *http.Request) { expvar.Do(func(kv expvar.KeyValue) { io.WriteString(writer, fmt.Sprintf("%s: %s\n", kv.Key, kv.Value)) diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 1acd5953..d520fcaf 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -2,7 +2,7 @@ package ratelimit import ( "fmt" - stats2 "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "strings" "sync" @@ -29,12 +29,12 @@ type service struct { config config.RateLimitConfig runtimeUpdateEvent chan int cache limiter.RateLimitCache - stats stats2.ServiceStats + stats stats.ServiceStats legacy *legacyService runtimeWatchRoot bool } -func (this *service) reloadConfig(manager stats2.Manager) { +func (this *service) reloadConfig(manager stats.Manager) { defer func() { if e := recover(); e != nil { configError, ok := e.(config.RateLimitConfigError) @@ -173,7 +173,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { //todo: add methods to interface func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, manager stats2.Manager, runtimeWatchRoot bool) RateLimitServiceServer { + configLoader config.RateLimitConfigLoader, manager stats.Manager, runtimeWatchRoot bool) RateLimitServiceServer { newService := &service{ runtime: runtime, diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index 8a420fd6..ac3971e0 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -5,7 +5,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stats2 "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" ) @@ -17,7 +17,7 @@ type RateLimitLegacyServiceServer interface { // the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. type legacyService struct { s *service - shouldRateLimitLegacyStats stats2.ShouldRateLimitLegacyStats + shouldRateLimitLegacyStats stats.ShouldRateLimitLegacyStats } func (this *legacyService) ShouldRateLimit( From a66532de3a9ac06baabb8c0797b1d04533105fb4 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 22 Feb 2021 16:12:29 -0300 Subject: [PATCH 13/26] test package naming --- test/redis/bench_test.go | 8 ++++---- test/redis/fixed_cache_impl_test.go | 28 +++++++++++++-------------- test/service/ratelimit_legacy_test.go | 8 ++++---- test/service/ratelimit_test.go | 16 +++++++-------- 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 32db1a4e..37bca184 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -2,7 +2,7 @@ package redis_test import ( "context" - stats2 "github.com/envoyproxy/ratelimit/test/mocks/stats" + "github.com/envoyproxy/ratelimit/test/mocks/stats" "runtime" "testing" "time" @@ -11,7 +11,7 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "math/rand" @@ -41,8 +41,8 @@ func BenchmarkParallelDoLimit(b *testing.B) { mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int) func(*testing.B) { return func(b *testing.B) { - statsStore := stats.NewStore(stats.NewNullSink(), false) - sm := stats2.NewMockStatManager(statsStore) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 48915be0..a7d5de46 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,7 +1,7 @@ package redis_test import ( - stats2 "github.com/envoyproxy/ratelimit/test/mocks/stats" + "github.com/envoyproxy/ratelimit/test/mocks/stats" "testing" "github.com/coocood/freecache" @@ -12,7 +12,7 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "math/rand" @@ -37,8 +37,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - statsStore := stats.NewStore(stats.NewNullSink(), false) - sm := stats2.NewMockStatManager(statsStore) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) client := mock_redis.NewMockClient(controller) perSecondClient := mock_redis.NewMockClient(controller) @@ -129,14 +129,14 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { } } -func testLocalCacheStats(localCacheStats stats.StatGenerator, statsStore stats.Store, sink *common.TestStatSink, +func testLocalCacheStats(localCacheStats gostats.StatGenerator, statsStore gostats.Store, sink *common.TestStatSink, expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int, expectedEntryCount int) func(*testing.T) { return func(t *testing.T) { localCacheStats.GenerateStats() statsStore.Flush() - // Check whether all local_cache related stats are available. + // Check whether all local_cache related gostats are available. _, ok := sink.Record["averageAccessTime"] assert.Equal(t, true, ok) hitCount, ok := sink.Record["hitCount"] @@ -173,8 +173,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - statsStore := stats.NewStore(stats.NewNullSink(), false) - sm := stats2.NewMockStatManager(statsStore) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm) sink := &common.TestStatSink{} localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) @@ -222,7 +222,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Check the local cache stats. testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) - // Test Over limit stats + // Test Over limit gostats timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), @@ -241,7 +241,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Check the local cache stats. testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) - // Test Over limit stats with local cache + // Test Over limit gostats with local cache timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), @@ -266,8 +266,8 @@ func TestNearLimit(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) - statsStore := stats.NewStore(stats.NewNullSink(), false) - sm := stats2.NewMockStatManager(statsStore) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) // Test Near Limit Stats. Under Near Limit Ratio @@ -427,8 +427,8 @@ func TestRedisWithJitter(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) - statsStore := stats.NewStore(stats.NewNullSink(), false) - sm := stats2.NewMockStatManager(statsStore) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index 47271684..a0d7bef6 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -1,7 +1,7 @@ package ratelimit_test import ( - stats2 "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "testing" core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -80,7 +80,7 @@ func TestServiceLegacy(test *testing.T) { barrier := newBarrier() t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats2.Manager) { barrier.signal() }).Return(t.config) + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -120,7 +120,7 @@ func TestServiceLegacy(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats2.Manager) { + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() panic(config.RateLimitConfigError("load error")) }) @@ -218,7 +218,7 @@ func TestInitialLoadErrorLegacy(test *testing.T) { t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats2.Manager) { + func([]config.RateLimitConfigToLoad, stats.Manager) { panic(config.RateLimitConfigError("load error")) }) service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.sm, true) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 7d5fbad9..49def355 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -1,7 +1,7 @@ package ratelimit_test import ( - stats2 "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "sync" "testing" @@ -16,7 +16,7 @@ import ( mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" stats3 "github.com/envoyproxy/ratelimit/test/mocks/stats" "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -57,8 +57,8 @@ type rateLimitServiceTestSuite struct { configLoader *mock_config.MockRateLimitConfigLoader config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int - sm stats2.Manager - store stats.Store + sm stats.Manager + store gostats.Store } func commonSetup(t *testing.T) rateLimitServiceTestSuite { @@ -70,7 +70,7 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) - ret.store = stats.NewStore(stats.NewNullSink(), false) + ret.store = gostats.NewStore(gostats.NewNullSink(), false) ret.sm = stats3.NewMockStatManager(ret.store) return ret } @@ -113,7 +113,7 @@ func TestService(test *testing.T) { barrier := newBarrier() t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats2.Manager) { barrier.signal() }).Return(t.config) + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -143,7 +143,7 @@ func TestService(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats2.Manager) { + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() panic(config.RateLimitConfigError("load error")) }) @@ -229,7 +229,7 @@ func TestInitialLoadError(test *testing.T) { t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats2.Manager) { + func([]config.RateLimitConfigToLoad, stats.Manager) { panic(config.RateLimitConfigError("load error")) }) service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.sm, true) From 848093631785d2cdf434f5dba01d41a7e5a26c4b Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 22 Feb 2021 16:26:05 -0300 Subject: [PATCH 14/26] added logging --- src/stats/manager_impl.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index c22cfc7c..ac178e3a 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -8,6 +8,7 @@ import ( ) func NewStatManager(store gostats.Store, s settings.Settings) *ManagerImpl { + logger.Warnf("Initializing Stat Manager with detailed metrics %t", s.DetailedMetrics) serviceScope := store.ScopeWithTags("ratelimit", s.ExtraTags).Scope("service") return &ManagerImpl{ store: store, From 1e7bbe9adade5865eb022790a34bd22bc01289c6 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Tue, 23 Feb 2021 09:42:45 -0300 Subject: [PATCH 15/26] use descriptor key instead of cache key --- src/limiter/base_limiter.go | 42 ++++++++++++++++++------------------- src/memcached/cache_impl.go | 4 +++- src/stats/manager_impl.go | 2 +- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 0ec9d08b..d3d20c69 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -49,7 +49,7 @@ func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { - this.Manager.AddTotalHits(uint64(hitsAddend), limits[i].Stats, cacheKeys[i].Key) + this.Manager.AddTotalHits(uint64(hitsAddend), limits[i].Stats, stats.DescriptorKey(request.Domain, request.Descriptors[i])) } } return cacheKeys @@ -69,15 +69,15 @@ func (this *BaseRateLimiter) IsOverLimitWithLocalCache(key string) bool { // Generates response descriptor status based on cache key, over the limit with local cache, over the limit and // near the limit thresholds. Thresholds are checked in order and are mutually exclusive. -func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo, - isOverLimitWithLocalCache bool, hitsAddend uint32) *pb.RateLimitResponse_DescriptorStatus { - if key == "" { +func (this *BaseRateLimiter) GetResponseDescriptorStatus(localCacheKey string, limitInfo *LimitInfo, + isOverLimitWithLocalCache bool, hitsAddend uint32, descriptorKey string) *pb.RateLimitResponse_DescriptorStatus { + if localCacheKey == "" { return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, nil, 0) } if isOverLimitWithLocalCache { - this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, key) - this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend), limitInfo.limit.Stats, key) + this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) + this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) } @@ -86,24 +86,24 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * // The nearLimitThreshold is the number of requests that can be made before hitting the nearLimitRatio. // We need to know it in both the OK and OVER_LIMIT scenarios. limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) - logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease) + logger.Debugf("cache localCacheKey: %s current: %d", localCacheKey, limitInfo.limitAfterIncrease) if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) - this.checkOverLimitThreshold(limitInfo, hitsAddend, key) + this.checkOverLimitThreshold(limitInfo, hitsAddend, descriptorKey) if this.localCache != nil { // Set the TTL of the local_cache to be the entire duration. // Since the cache_key gets changed once the time crosses over current time slot, the over-the-limit // cache keys in local_cache lose effectiveness. - // For example, if we have an hour limit on all mongo connections, the cache key would be - // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start + // For example, if we have an hour limit on all mongo connections, the cache localCacheKey would be + // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache localCacheKey is mongo_1h, we start // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). - // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. - err := this.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit))) + // In the time of 1h1m, since the cache localCacheKey becomes different (mongo_2h), it won't get ratelimited. + err := this.localCache.Set([]byte(localCacheKey), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit))) if err != nil { - logger.Errorf("Failing to set local cache key: %s", key) + logger.Errorf("Failing to set local cache localCacheKey: %s", localCacheKey) } } } else { @@ -111,7 +111,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) // The limit is OK but we additionally want to know if we are near the limit. - this.checkNearLimitThreshold(limitInfo, hitsAddend, key) + this.checkNearLimitThreshold(limitInfo, hitsAddend, descriptorKey) } return responseDescriptorStatus } @@ -129,37 +129,37 @@ func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expira } } -func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, key string) { +func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, descriptorKey string) { // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the // N hits was over the limit, then all the N hits were over limit. // Otherwise, only the difference between the current limit value and the over limit threshold // were over limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { - this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, key) + this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) } else { - this.Manager.AddOverLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.overLimitThreshold), limitInfo.limit.Stats, key) + this.Manager.AddOverLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.overLimitThreshold), limitInfo.limit.Stats, descriptorKey) // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. this.Manager.AddNearLimit( uint64(limitInfo.overLimitThreshold-utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)), limitInfo.limit.Stats, - key, + descriptorKey, ) } } -func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, key string) { +func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, descriptorKey string) { if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold { // Here we also need to assess which portion of the hitsAddend were in the near limit range. // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, // only the difference between the current limit value and the near limit threshold were near // limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold { - this.Manager.AddNearLimit(uint64(hitsAddend), limitInfo.limit.Stats, key) + this.Manager.AddNearLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey) } else { - this.Manager.AddNearLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.nearLimitThreshold), limitInfo.limit.Stats, key) + this.Manager.AddNearLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.nearLimitThreshold), limitInfo.limit.Stats, descriptorKey) } } } diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 78be2171..ebc48ea1 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -66,6 +66,7 @@ func (this *rateLimitMemcacheImpl) DoLimit( cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) + descriptorKeys := make([]string, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -73,6 +74,7 @@ func (this *rateLimitMemcacheImpl) DoLimit( if cacheKey.Key == "" { continue } + descriptorKeys[i] = stats.DescriptorKey(request.Domain, request.Descriptors[i]) // Check if key is over the limit in local cache. if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { @@ -118,7 +120,7 @@ func (this *rateLimitMemcacheImpl) DoLimit( limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + limitInfo, isOverLimitWithLocalCache[i], hitsAddend, descriptorKeys[i]) } this.waitGroup.Add(1) diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index ac178e3a..a15f6f36 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -8,7 +8,7 @@ import ( ) func NewStatManager(store gostats.Store, s settings.Settings) *ManagerImpl { - logger.Warnf("Initializing Stat Manager with detailed metrics %t", s.DetailedMetrics) + logger.Infof("Initializing Stat Manager with detailed metrics %t", s.DetailedMetrics) serviceScope := store.ScopeWithTags("ratelimit", s.ExtraTags).Scope("service") return &ManagerImpl{ store: store, From 10a0e09ad66a83804c027b11471e24204e084fed Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Tue, 23 Feb 2021 09:54:19 -0300 Subject: [PATCH 16/26] fix rate limiting tests --- src/memcached/cache_impl.go | 4 +--- src/redis/fixed_cache_impl.go | 2 +- test/limiter/base_limiter_test.go | 8 ++++---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index ebc48ea1..2d3935d5 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -66,7 +66,6 @@ func (this *rateLimitMemcacheImpl) DoLimit( cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) - descriptorKeys := make([]string, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -74,7 +73,6 @@ func (this *rateLimitMemcacheImpl) DoLimit( if cacheKey.Key == "" { continue } - descriptorKeys[i] = stats.DescriptorKey(request.Domain, request.Descriptors[i]) // Check if key is over the limit in local cache. if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { @@ -120,7 +118,7 @@ func (this *rateLimitMemcacheImpl) DoLimit( limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend, descriptorKeys[i]) + limitInfo, isOverLimitWithLocalCache[i], hitsAddend, stats.DescriptorKey(request.Domain, request.Descriptors[i])) } this.waitGroup.Add(1) diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 2a0d5f7f..625db829 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -97,7 +97,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + limitInfo, isOverLimitWithLocalCache[i], hitsAddend, stats.DescriptorKey(request.Domain, request.Descriptors[i])) } diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index 29bbc185..904675f0 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -86,7 +86,7 @@ func TestGetResponseStatusEmptyKey(t *testing.T) { defer controller.Finish() sm := stats2.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm) - responseStatus := baseRateLimit.GetResponseDescriptorStatus("", nil, false, 1) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("", nil, false, 1, "") assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) } @@ -103,7 +103,7 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2, "") assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) @@ -123,7 +123,7 @@ func TestGetResponseStatusOverLimit(t *testing.T) { baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1, "") assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) @@ -145,7 +145,7 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1, "") assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) assert.Equal(uint32(4), responseStatus.GetLimitRemaining()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) From 2544c4fefed3059ba541031a97042f1ba91f84f7 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Tue, 23 Feb 2021 10:50:47 -0300 Subject: [PATCH 17/26] Revert modified comments --- src/limiter/base_limiter.go | 6 +++--- src/server/server_impl.go | 8 ++++---- src/service_cmd/runner/runner.go | 12 ++++++------ src/stats/manager_impl.go | 6 +++--- test/mocks/stats/manager.go | 2 +- test/redis/fixed_cache_impl_test.go | 6 +++--- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index d3d20c69..dc24ab21 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -97,10 +97,10 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(localCacheKey string, l // Set the TTL of the local_cache to be the entire duration. // Since the cache_key gets changed once the time crosses over current time slot, the over-the-limit // cache keys in local_cache lose effectiveness. - // For example, if we have an hour limit on all mongo connections, the cache localCacheKey would be - // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache localCacheKey is mongo_1h, we start + // For example, if we have an hour limit on all mongo connections, the cache key would be + // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). - // In the time of 1h1m, since the cache localCacheKey becomes different (mongo_2h), it won't get ratelimited. + // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. err := this.localCache.Set([]byte(localCacheKey), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit))) if err != nil { logger.Errorf("Failing to set local cache localCacheKey: %s", localCacheKey) diff --git a/src/server/server_impl.go b/src/server/server_impl.go index b9499b55..ba3abdfc 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -186,7 +186,7 @@ func newServer(s settings.Settings, name string, manager stats.Manager, localCac ret.grpcPort = s.GrpcPort ret.debugPort = s.DebugPort - // setup gostats + // setup stats ret.store = manager.GetStatsStore() ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) ret.store.AddStatGenerator(gostats.NewRuntimeStats(ret.scope.Scope("go"))) @@ -245,10 +245,10 @@ func newServer(s settings.Settings, name string, manager stats.Manager, localCac pprof.Profile(writer, request) }) - // setup gostats endpoint + // setup stats endpoint ret.AddDebugHttpEndpoint( - "/gostats", - "print out gostats", + "/stats", + "print out stats", func(writer http.ResponseWriter, request *http.Request) { expvar.Do(func(kv expvar.KeyValue) { io.WriteString(writer, fmt.Sprintf("%s: %s\n", kv.Key, kv.Value)) diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index c66e384d..e45ec392 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -1,7 +1,7 @@ package runner import ( - stats2 "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats" "io" "math/rand" "net/http" @@ -9,7 +9,7 @@ import ( "sync" "time" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/coocood/freecache" @@ -28,7 +28,7 @@ import ( ) type Runner struct { - manager stats2.Manager + manager stats.Manager settings settings.Settings srv server.Server mu sync.Mutex @@ -36,16 +36,16 @@ type Runner struct { func NewRunner(s settings.Settings) Runner { return Runner{ - manager: stats2.NewStatManager(stats.NewDefaultStore(), s), + manager: stats.NewStatManager(gostats.NewDefaultStore(), s), settings: s, } } -func (runner *Runner) GetStatsStore() stats.Store { +func (runner *Runner) GetStatsStore() gostats.Store { return runner.manager.GetStatsStore() } -func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, manager stats2.Manager) limiter.RateLimitCache { +func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, manager stats.Manager) limiter.RateLimitCache { switch s.BackendType { case "redis", "": return redis.NewRateLimiterCacheImplFromSettings( diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index a15f6f36..059499a9 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -63,8 +63,8 @@ func (this *ManagerImpl) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitS } } -//todo: consider adding a ratelimitstats cache -//todo: add descriptor fields parameter to allow configuration of descriptor entries for which metrics will be emited. +//todo: consider adding a RateLimitStats cache +//todo: consider adding descriptor fields parameter to allow configuration of descriptor entries for which metrics will be emited. func (this *ManagerImpl) getDescriptorStat(key string) RateLimitStats { ret := this.NewStats(key) return ret @@ -141,7 +141,7 @@ func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) str } // Stats for an individual rate limit config entry. -//todo: unexport fields +//todo: Ideally the gostats package fields should be unexported and iteracted with via getters and setters. type RateLimitStats struct { Key string TotalHits gostats.Counter diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index 8e1d7b69..a505457e 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -39,7 +39,7 @@ func (m *MockStatManager) NewShouldRateLimitLegacyStats() stat.ShouldRateLimitLe } } -//todo: consider not using mock since pretty much all logic is repeated. +//todo: review mock implementation func (m *MockStatManager) NewStats(key string) stat.RateLimitStats { ret := stat.RateLimitStats{} logger.Debugf("outputing test stats %s", key) diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index a7d5de46..887d583c 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -136,7 +136,7 @@ func testLocalCacheStats(localCacheStats gostats.StatGenerator, statsStore gosta localCacheStats.GenerateStats() statsStore.Flush() - // Check whether all local_cache related gostats are available. + // Check whether all local_cache related stats are available. _, ok := sink.Record["averageAccessTime"] assert.Equal(t, true, ok) hitCount, ok := sink.Record["hitCount"] @@ -222,7 +222,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Check the local cache stats. testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) - // Test Over limit gostats + // Test Over limit stats timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), @@ -241,7 +241,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Check the local cache stats. testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) - // Test Over limit gostats with local cache + // Test Over limit stats with local cache timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), From 704e4018af35186271063b063ee0616602c9aec4 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Tue, 23 Feb 2021 10:51:54 -0300 Subject: [PATCH 18/26] improve ToDos --- src/service/ratelimit.go | 1 - src/stats/manager_impl.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index d520fcaf..00d421af 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -171,7 +171,6 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { return this.config } -//todo: add methods to interface func NewService(runtime loader.IFace, cache limiter.RateLimitCache, configLoader config.RateLimitConfigLoader, manager stats.Manager, runtimeWatchRoot bool) RateLimitServiceServer { diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 059499a9..647b9488 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -141,7 +141,7 @@ func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) str } // Stats for an individual rate limit config entry. -//todo: Ideally the gostats package fields should be unexported and iteracted with via getters and setters. +//todo: Ideally the gostats package fields should be unexported and interacted with via getters and setters. type RateLimitStats struct { Key string TotalHits gostats.Counter From 048a03002e092940914d759d3255b92007cb7cf1 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Tue, 23 Feb 2021 14:54:26 -0300 Subject: [PATCH 19/26] unit tests --- test/stats/detailedmetrics_test.go | 121 +++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 test/stats/detailedmetrics_test.go diff --git a/test/stats/detailedmetrics_test.go b/test/stats/detailedmetrics_test.go new file mode 100644 index 00000000..02d1014c --- /dev/null +++ b/test/stats/detailedmetrics_test.go @@ -0,0 +1,121 @@ +package stats + +import ( + "github.com/envoyproxy/ratelimit/src/config" + ratelimit "github.com/envoyproxy/ratelimit/src/service" + settings "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" + mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" + mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + "github.com/golang/mock/gomock" + gostats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" + "testing" +) + +func commonSetup(t *testing.T) rateLimitServiceTestSuite { + ret := rateLimitServiceTestSuite{} + ret.assert = assert.New(t) + ret.controller = gomock.NewController(t) + ret.runtime = mock_loader.NewMockIFace(ret.controller) + ret.snapshot = mock_snapshot.NewMockIFace(ret.controller) + ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) + ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) + ret.config = mock_config.NewMockRateLimitConfig(ret.controller) + ret.store = gostats.NewStore(gostats.NewNullSink(), false) + sett := settings.NewSettings() + sett.DetailedMetrics = true + ret.sm = stats.NewStatManager(ret.store, sett) + return ret +} + + +type rateLimitServiceTestSuite struct { + assert *assert.Assertions + controller *gomock.Controller + runtime *mock_loader.MockIFace + snapshot *mock_snapshot.MockIFace + cache *mock_limiter.MockRateLimitCache + configLoader *mock_config.MockRateLimitConfigLoader + config *mock_config.MockRateLimitConfig + runtimeUpdateCallback chan<- int + sm stats.Manager + store gostats.Store +} + +func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitServiceServer { + this.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( + func(callback chan<- int) { + this.runtimeUpdateCallback = callback + }) + this.runtime.EXPECT().Snapshot().Return(this.snapshot).MinTimes(1) + this.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) + this.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) + this.configLoader.EXPECT().Load( + []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, + gomock.Any()).Return(this.config) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.sm, true) +} + +func TestDetailedMetricsTotalHits(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddTotalHits(11, rlStats, detailedKey1) + t.sm.AddTotalHits(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).TotalHits.Value()) + assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).TotalHits.Value()) + assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).TotalHits.Value()) +} +func TestDetailedMetricsNearLimit(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddNearLimit(11, rlStats, detailedKey1) + t.sm.AddNearLimit(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).NearLimit.Value()) + assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).NearLimit.Value()) + assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).NearLimit.Value()) +} +func TestDetailedMetricsOverLimit(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddOverLimit(11, rlStats, detailedKey1) + t.sm.AddOverLimit(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimit.Value()) + assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).OverLimit.Value()) + assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).OverLimit.Value()) +} +func TestDetailedMetricsOverLimitWithLocalCache(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddOverLimitWithLocalCache(11, rlStats, detailedKey1) + t.sm.AddOverLimitWithLocalCache(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).OverLimitWithLocalCache.Value()) +} From 71ac855b0fe9ee994e9eed988eb6cebe8968acda Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Tue, 23 Feb 2021 14:54:50 -0300 Subject: [PATCH 20/26] go fmt --- test/stats/detailedmetrics_test.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/test/stats/detailedmetrics_test.go b/test/stats/detailedmetrics_test.go index 02d1014c..2cd3eca2 100644 --- a/test/stats/detailedmetrics_test.go +++ b/test/stats/detailedmetrics_test.go @@ -31,7 +31,6 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { return ret } - type rateLimitServiceTestSuite struct { assert *assert.Assertions controller *gomock.Controller @@ -71,8 +70,8 @@ func TestDetailedMetricsTotalHits(test *testing.T) { t.sm.AddTotalHits(22, rlStats, detailedKey2) assert.Equal(test, uint64(33), t.sm.NewStats(key).TotalHits.Value()) - assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).TotalHits.Value()) - assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).TotalHits.Value()) + assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).TotalHits.Value()) + assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).TotalHits.Value()) } func TestDetailedMetricsNearLimit(test *testing.T) { t := commonSetup(test) @@ -86,8 +85,8 @@ func TestDetailedMetricsNearLimit(test *testing.T) { t.sm.AddNearLimit(22, rlStats, detailedKey2) assert.Equal(test, uint64(33), t.sm.NewStats(key).NearLimit.Value()) - assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).NearLimit.Value()) - assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).NearLimit.Value()) + assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).NearLimit.Value()) + assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).NearLimit.Value()) } func TestDetailedMetricsOverLimit(test *testing.T) { t := commonSetup(test) @@ -101,8 +100,8 @@ func TestDetailedMetricsOverLimit(test *testing.T) { t.sm.AddOverLimit(22, rlStats, detailedKey2) assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimit.Value()) - assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).OverLimit.Value()) - assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).OverLimit.Value()) + assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).OverLimit.Value()) + assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).OverLimit.Value()) } func TestDetailedMetricsOverLimitWithLocalCache(test *testing.T) { t := commonSetup(test) @@ -116,6 +115,6 @@ func TestDetailedMetricsOverLimitWithLocalCache(test *testing.T) { t.sm.AddOverLimitWithLocalCache(22, rlStats, detailedKey2) assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimitWithLocalCache.Value()) - assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).OverLimitWithLocalCache.Value()) - assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).OverLimitWithLocalCache.Value()) } From e9815ab92aff94dc608afdbf7263fd657b57761e Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Wed, 24 Feb 2021 09:51:13 -0300 Subject: [PATCH 21/26] Fix multiple additions to same metric --- src/stats/manager_impl.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 647b9488..0d846f78 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -33,7 +33,7 @@ func (this *ManagerImpl) GetStatsStore() gostats.Store { func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, key string) { rlStats.TotalHits.Add(u) - if this.detailed { + if this.detailed && key != rlStats.Key { stat := this.getDescriptorStat(key) stat.TotalHits.Add(u) } @@ -41,7 +41,7 @@ func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, key stri func (this *ManagerImpl) AddOverLimit(u uint64, rlStats RateLimitStats, key string) { rlStats.OverLimit.Add(u) - if this.detailed { + if this.detailed && key != rlStats.Key { stat := this.getDescriptorStat(key) stat.OverLimit.Add(u) } @@ -49,7 +49,7 @@ func (this *ManagerImpl) AddOverLimit(u uint64, rlStats RateLimitStats, key stri func (this *ManagerImpl) AddNearLimit(u uint64, rlStats RateLimitStats, key string) { rlStats.NearLimit.Add(u) - if this.detailed { + if this.detailed && key != rlStats.Key { stat := this.getDescriptorStat(key) stat.NearLimit.Add(u) } @@ -57,7 +57,7 @@ func (this *ManagerImpl) AddNearLimit(u uint64, rlStats RateLimitStats, key stri func (this *ManagerImpl) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, key string) { rlStats.OverLimitWithLocalCache.Add(u) - if this.detailed { + if this.detailed && key != rlStats.Key { stat := this.getDescriptorStat(key) stat.OverLimitWithLocalCache.Add(u) } @@ -141,7 +141,9 @@ func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) str } // Stats for an individual rate limit config entry. -//todo: Ideally the gostats package fields should be unexported and interacted with via getters and setters. +//todo: Ideally the gostats package fields should be unexported +// the inner value could be interacted with via getters such as rlStats.TotalHits() uint64 +// This ensures that setters such as Inc() and Add() can only be managed by ManagerImpl. type RateLimitStats struct { Key string TotalHits gostats.Counter From 3aaf84261f94f5913f93f39e96a550ed873e89de Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Wed, 24 Feb 2021 09:56:06 -0300 Subject: [PATCH 22/26] Fix outdated method doc --- src/config/config_impl.go | 5 ++--- src/stats/manager_impl.go | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 491cdfc8..6f1c67ba 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -56,8 +56,7 @@ var validKeys = map[string]bool{ // Create a new rate limit config entry. // @param requestsPerUnit supplies the requests per unit of time for the entry. // @param unit supplies the unit of time for the entry. -// @param key supplies the fully resolved key name of the entry. -// @param scope supplies the owning scope. +// @param rlStats supplies the stats structure associated with the RateLimit // @return the new config entry. func NewRateLimit( requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit { @@ -90,7 +89,7 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit // @param config supplies the config file that owns the descriptor. // @param parentKey supplies the fully resolved key name that owns this config level. // @param descriptors supplies the YAML descriptors to load. -// @param statsScope supplies the owning scope. +// @param manager that owns the stats.Scope. func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, manager stats.Manager) { for _, descriptorConfig := range descriptors { diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 0d846f78..fe98ca6c 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -71,7 +71,6 @@ func (this *ManagerImpl) getDescriptorStat(key string) RateLimitStats { } // Create new rate descriptor stats for a descriptor tuple. -// @param statsScope supplies the owning store. // @param key supplies the fully resolved descriptor tuple. // @return new stats. func (this *ManagerImpl) NewStats(key string) RateLimitStats { From 05db202cc6ef3ccb4c5f76b68816f73facc4e56b Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 1 Mar 2021 11:27:43 -0300 Subject: [PATCH 23/26] detailed metrics in different scope --- src/stats/manager.go | 1 + src/stats/manager_impl.go | 19 +++++++++++++++++-- test/mocks/stats/manager.go | 4 ++++ test/stats/detailedmetrics_test.go | 16 ++++++++-------- 4 files changed, 30 insertions(+), 10 deletions(-) diff --git a/src/stats/manager.go b/src/stats/manager.go index 8dea48bc..c55bea05 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -12,4 +12,5 @@ type Manager interface { NewServiceStats() ServiceStats NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats GetStatsStore() stats.Store + NewDetailedStats(key string) RateLimitStats } diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index fe98ca6c..4325c8ef 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -15,6 +15,7 @@ func NewStatManager(store gostats.Store, s settings.Settings) *ManagerImpl { rlStatsScope: serviceScope.Scope("rate_limit"), legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), serviceStatsScope: serviceScope, + detailedMetricsScope: serviceScope.Scope("rate_limit").Scope("detailed"), detailed: s.DetailedMetrics, } } @@ -24,6 +25,7 @@ type ManagerImpl struct { rlStatsScope gostats.Scope legacyStatsScope gostats.Scope serviceStatsScope gostats.Scope + detailedMetricsScope gostats.Scope detailed bool } @@ -66,7 +68,7 @@ func (this *ManagerImpl) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitS //todo: consider adding a RateLimitStats cache //todo: consider adding descriptor fields parameter to allow configuration of descriptor entries for which metrics will be emited. func (this *ManagerImpl) getDescriptorStat(key string) RateLimitStats { - ret := this.NewStats(key) + ret := this.NewDetailedStats(key) return ret } @@ -75,7 +77,7 @@ func (this *ManagerImpl) getDescriptorStat(key string) RateLimitStats { // @return new stats. func (this *ManagerImpl) NewStats(key string) RateLimitStats { ret := RateLimitStats{} - logger.Debugf("outputing test stats %s", key) + logger.Debugf("Creating stats for key: '%s'", key) ret.Key = key ret.TotalHits = this.rlStatsScope.NewCounter(key + ".total_hits") ret.OverLimit = this.rlStatsScope.NewCounter(key + ".over_limit") @@ -84,6 +86,18 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { return ret } +func (this *ManagerImpl) NewDetailedStats(key string) RateLimitStats { + ret := RateLimitStats{} + logger.Debugf("Creating detailed stats for key: '%s'", key) + ret.Key = key + ret.TotalHits = this.detailedMetricsScope.NewCounter(key + ".total_hits") + ret.OverLimit = this.detailedMetricsScope.NewCounter(key + ".over_limit") + ret.NearLimit = this.detailedMetricsScope.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = this.detailedMetricsScope.NewCounter(key + ".over_limit_with_local_cache") + return ret + +} + type ShouldRateLimitLegacyStats struct { ReqConversionError gostats.Counter RespConversionError gostats.Counter @@ -125,6 +139,7 @@ func (this *ManagerImpl) NewServiceStats() ServiceStats { return ret } + func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) string { rateLimitKey := "" for _, entry := range descriptor.Entries { diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index a505457e..0c838368 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -67,6 +67,10 @@ func (this *MockStatManager) AddOverLimitWithLocalCache(u uint64, rlStats stat.R rlStats.OverLimitWithLocalCache.Add(u) } +func (this *MockStatManager) NewDetailedStats(key string) stat.RateLimitStats { + return this.NewStats(key) +} + func NewMockStatManager(store stats.Store) stat.Manager { return &MockStatManager{store: store} } diff --git a/test/stats/detailedmetrics_test.go b/test/stats/detailedmetrics_test.go index 2cd3eca2..7d23e8e1 100644 --- a/test/stats/detailedmetrics_test.go +++ b/test/stats/detailedmetrics_test.go @@ -70,8 +70,8 @@ func TestDetailedMetricsTotalHits(test *testing.T) { t.sm.AddTotalHits(22, rlStats, detailedKey2) assert.Equal(test, uint64(33), t.sm.NewStats(key).TotalHits.Value()) - assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).TotalHits.Value()) - assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).TotalHits.Value()) + assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).TotalHits.Value()) + assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).TotalHits.Value()) } func TestDetailedMetricsNearLimit(test *testing.T) { t := commonSetup(test) @@ -85,8 +85,8 @@ func TestDetailedMetricsNearLimit(test *testing.T) { t.sm.AddNearLimit(22, rlStats, detailedKey2) assert.Equal(test, uint64(33), t.sm.NewStats(key).NearLimit.Value()) - assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).NearLimit.Value()) - assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).NearLimit.Value()) + assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).NearLimit.Value()) + assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).NearLimit.Value()) } func TestDetailedMetricsOverLimit(test *testing.T) { t := commonSetup(test) @@ -100,8 +100,8 @@ func TestDetailedMetricsOverLimit(test *testing.T) { t.sm.AddOverLimit(22, rlStats, detailedKey2) assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimit.Value()) - assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).OverLimit.Value()) - assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).OverLimit.Value()) + assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).OverLimit.Value()) + assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).OverLimit.Value()) } func TestDetailedMetricsOverLimitWithLocalCache(test *testing.T) { t := commonSetup(test) @@ -115,6 +115,6 @@ func TestDetailedMetricsOverLimitWithLocalCache(test *testing.T) { t.sm.AddOverLimitWithLocalCache(22, rlStats, detailedKey2) assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimitWithLocalCache.Value()) - assert.Equal(test, uint64(11), t.sm.NewStats(detailedKey1).OverLimitWithLocalCache.Value()) - assert.Equal(test, uint64(22), t.sm.NewStats(detailedKey2).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).OverLimitWithLocalCache.Value()) } From 23c136638ae609f761709779104bd124dd8a0016 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 1 Mar 2021 11:31:36 -0300 Subject: [PATCH 24/26] go fmt --- src/stats/manager_impl.go | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 4325c8ef..6ad86bc4 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -11,22 +11,22 @@ func NewStatManager(store gostats.Store, s settings.Settings) *ManagerImpl { logger.Infof("Initializing Stat Manager with detailed metrics %t", s.DetailedMetrics) serviceScope := store.ScopeWithTags("ratelimit", s.ExtraTags).Scope("service") return &ManagerImpl{ - store: store, - rlStatsScope: serviceScope.Scope("rate_limit"), - legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), - serviceStatsScope: serviceScope, + store: store, + rlStatsScope: serviceScope.Scope("rate_limit"), + legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), + serviceStatsScope: serviceScope, detailedMetricsScope: serviceScope.Scope("rate_limit").Scope("detailed"), - detailed: s.DetailedMetrics, + detailed: s.DetailedMetrics, } } type ManagerImpl struct { - store gostats.Store - rlStatsScope gostats.Scope - legacyStatsScope gostats.Scope - serviceStatsScope gostats.Scope + store gostats.Store + rlStatsScope gostats.Scope + legacyStatsScope gostats.Scope + serviceStatsScope gostats.Scope detailedMetricsScope gostats.Scope - detailed bool + detailed bool } func (this *ManagerImpl) GetStatsStore() gostats.Store { @@ -139,7 +139,6 @@ func (this *ManagerImpl) NewServiceStats() ServiceStats { return ret } - func DescriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) string { rateLimitKey := "" for _, entry := range descriptor.Entries { From 3b793fd2f9a72ec8256e8212f8979a016aa43cc0 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 1 Mar 2021 11:41:31 -0300 Subject: [PATCH 25/26] Detailed Metrics turned off test --- test/stats/detailedmetrics_test.go | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/test/stats/detailedmetrics_test.go b/test/stats/detailedmetrics_test.go index 7d23e8e1..bc7ef30a 100644 --- a/test/stats/detailedmetrics_test.go +++ b/test/stats/detailedmetrics_test.go @@ -15,7 +15,7 @@ import ( "testing" ) -func commonSetup(t *testing.T) rateLimitServiceTestSuite { +func commonSetup(t *testing.T, detailedMetrics bool) rateLimitServiceTestSuite { ret := rateLimitServiceTestSuite{} ret.assert = assert.New(t) ret.controller = gomock.NewController(t) @@ -26,7 +26,7 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.store = gostats.NewStore(gostats.NewNullSink(), false) sett := settings.NewSettings() - sett.DetailedMetrics = true + sett.DetailedMetrics = detailedMetrics ret.sm = stats.NewStatManager(ret.store, sett) return ret } @@ -59,7 +59,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe } func TestDetailedMetricsTotalHits(test *testing.T) { - t := commonSetup(test) + t := commonSetup(test, true) defer t.controller.Finish() key := "hello_world" @@ -74,7 +74,7 @@ func TestDetailedMetricsTotalHits(test *testing.T) { assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).TotalHits.Value()) } func TestDetailedMetricsNearLimit(test *testing.T) { - t := commonSetup(test) + t := commonSetup(test, true) defer t.controller.Finish() key := "hello_world" @@ -89,7 +89,7 @@ func TestDetailedMetricsNearLimit(test *testing.T) { assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).NearLimit.Value()) } func TestDetailedMetricsOverLimit(test *testing.T) { - t := commonSetup(test) + t := commonSetup(test, true) defer t.controller.Finish() key := "hello_world" @@ -104,7 +104,7 @@ func TestDetailedMetricsOverLimit(test *testing.T) { assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).OverLimit.Value()) } func TestDetailedMetricsOverLimitWithLocalCache(test *testing.T) { - t := commonSetup(test) + t := commonSetup(test, true) defer t.controller.Finish() key := "hello_world" @@ -118,3 +118,18 @@ func TestDetailedMetricsOverLimitWithLocalCache(test *testing.T) { assert.Equal(test, uint64(11), t.sm.NewDetailedStats(detailedKey1).OverLimitWithLocalCache.Value()) assert.Equal(test, uint64(22), t.sm.NewDetailedStats(detailedKey2).OverLimitWithLocalCache.Value()) } +func TestDetailedMetricsTurnedOff(test *testing.T) { + t := commonSetup(test, false) + defer t.controller.Finish() + + key := "hello_world" + detailedKey1 := "hello_world_detailed1" + detailedKey2 := "hello_world_detailed2" + rlStats := t.sm.NewStats(key) + t.sm.AddOverLimitWithLocalCache(11, rlStats, detailedKey1) + t.sm.AddOverLimitWithLocalCache(22, rlStats, detailedKey2) + + assert.Equal(test, uint64(33), t.sm.NewStats(key).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(0), t.sm.NewDetailedStats(detailedKey1).OverLimitWithLocalCache.Value()) + assert.Equal(test, uint64(0), t.sm.NewDetailedStats(detailedKey2).OverLimitWithLocalCache.Value()) +} From e859b6440a04210f501ac7e3589902014ede30b8 Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Mon, 1 Mar 2021 12:56:22 -0300 Subject: [PATCH 26/26] Detailed Metric Condition --- src/stats/manager_impl.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 6ad86bc4..6270da5a 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -35,7 +35,7 @@ func (this *ManagerImpl) GetStatsStore() gostats.Store { func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, key string) { rlStats.TotalHits.Add(u) - if this.detailed && key != rlStats.Key { + if this.detailed { stat := this.getDescriptorStat(key) stat.TotalHits.Add(u) } @@ -43,7 +43,7 @@ func (this *ManagerImpl) AddTotalHits(u uint64, rlStats RateLimitStats, key stri func (this *ManagerImpl) AddOverLimit(u uint64, rlStats RateLimitStats, key string) { rlStats.OverLimit.Add(u) - if this.detailed && key != rlStats.Key { + if this.detailed { stat := this.getDescriptorStat(key) stat.OverLimit.Add(u) } @@ -51,7 +51,7 @@ func (this *ManagerImpl) AddOverLimit(u uint64, rlStats RateLimitStats, key stri func (this *ManagerImpl) AddNearLimit(u uint64, rlStats RateLimitStats, key string) { rlStats.NearLimit.Add(u) - if this.detailed && key != rlStats.Key { + if this.detailed { stat := this.getDescriptorStat(key) stat.NearLimit.Add(u) } @@ -59,7 +59,7 @@ func (this *ManagerImpl) AddNearLimit(u uint64, rlStats RateLimitStats, key stri func (this *ManagerImpl) AddOverLimitWithLocalCache(u uint64, rlStats RateLimitStats, key string) { rlStats.OverLimitWithLocalCache.Add(u) - if this.detailed && key != rlStats.Key { + if this.detailed { stat := this.getDescriptorStat(key) stat.OverLimitWithLocalCache.Add(u) }