Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Metric refactor #242

Merged
merged 20 commits into from
Jun 2, 2021
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 3 additions & 12 deletions src/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package config
import (
pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
stats "github.com/lyft/gostats"
"github.com/envoyproxy/ratelimit/src/stats"
"golang.org/x/net/context"
)

Expand All @@ -14,19 +14,10 @@ func (e RateLimitConfigError) Error() string {
return string(e)
}

// Stats for an individual rate limit config entry.
type RateLimitStats struct {
TotalHits stats.Counter
OverLimit stats.Counter
NearLimit stats.Counter
OverLimitWithLocalCache stats.Counter
WithinLimit stats.Counter
}

// Wrapper for an individual rate limit config entry which includes the defined limit and stats.
type RateLimit struct {
FullKey string
Stats RateLimitStats
Stats stats.RateLimitStats
Limit *pb.RateLimitResponse_RateLimit
}

Expand Down Expand Up @@ -56,5 +47,5 @@ type RateLimitConfigLoader interface {
// @param statsScope supplies the stats scope to use for limit stats during runtime.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: update function documentation with new parameter

// @return a new configuration.
// @throws RateLimitConfigError if the configuration could not be created.
Load(configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig
Load(configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig
}
68 changes: 17 additions & 51 deletions src/config/config_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (

pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
stats "github.com/lyft/gostats"
"github.com/envoyproxy/ratelimit/src/stats"
logger "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"gopkg.in/yaml.v2"
Expand Down Expand Up @@ -39,8 +39,8 @@ type rateLimitDomain struct {
}

type rateLimitConfigImpl struct {
domains map[string]*rateLimitDomain
statsScope stats.Scope
domains map[string]*rateLimitDomain
manager stats.Manager
}

var validKeys = map[string]bool{
Expand All @@ -53,30 +53,15 @@ var validKeys = map[string]bool{
"requests_per_unit": true,
}

// Create new rate limit stats for a config entry.
// @param statsScope supplies the owning scope.
// @param key supplies the fully resolved key name of the entry.
// @return new stats.
func newRateLimitStats(statsScope stats.Scope, key string) RateLimitStats {
ret := RateLimitStats{}
ret.TotalHits = statsScope.NewCounter(key + ".total_hits")
ret.OverLimit = statsScope.NewCounter(key + ".over_limit")
ret.NearLimit = statsScope.NewCounter(key + ".near_limit")
ret.OverLimitWithLocalCache = statsScope.NewCounter(key + ".over_limit_with_local_cache")
ret.WithinLimit = statsScope.NewCounter(key + ".within_limit")
return ret
}

// Create a new rate limit config entry.
// @param requestsPerUnit supplies the requests per unit of time for the entry.
// @param unit supplies the unit of time for the entry.
// @param key supplies the fully resolved key name of the entry.
// @param scope supplies the owning scope.
// @param rlStats supplies the stats structure associated with the RateLimit
// @return the new config entry.
func NewRateLimit(
requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, key string, scope stats.Scope) *RateLimit {
requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit {

return &RateLimit{FullKey: key, Stats: newRateLimitStats(scope, key), Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}}
return &RateLimit{FullKey: rlStats.String(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}}
}

// Dump an individual descriptor for debugging purposes.
Expand Down Expand Up @@ -104,10 +89,8 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit
// @param config supplies the config file that owns the descriptor.
// @param parentKey supplies the fully resolved key name that owns this config level.
// @param descriptors supplies the YAML descriptors to load.
// @param statsScope supplies the owning scope.
func (this *rateLimitDescriptor) loadDescriptors(
config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor,
statsScope stats.Scope) {
// @param manager that owns the stats.Scope.
func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, manager stats.Manager) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: rename arg name manager ->statsManager


for _, descriptorConfig := range descriptors {
if descriptorConfig.Key == "" {
Expand Down Expand Up @@ -138,8 +121,7 @@ func (this *rateLimitDescriptor) loadDescriptors(
}

rateLimit = NewRateLimit(
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), newParentKey,
statsScope)
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), manager.NewStats(newParentKey))
rateLimitDebugString = fmt.Sprintf(
" ratelimit={requests_per_unit=%d, unit=%s}", rateLimit.Limit.RequestsPerUnit,
rateLimit.Limit.Unit.String())
Expand All @@ -148,8 +130,7 @@ func (this *rateLimitDescriptor) loadDescriptors(
logger.Debugf(
"loading descriptor: key=%s%s", newParentKey, rateLimitDebugString)
newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit}
newDescriptor.loadDescriptors(
config, newParentKey+".", descriptorConfig.Descriptors, statsScope)
newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, manager)
this.descriptors[finalKey] = newDescriptor
}
}
Expand Down Expand Up @@ -229,24 +210,10 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) {

logger.Debugf("loading domain: %s", root.Domain)
newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}}
newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsScope)
newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.manager)
this.domains[root.Domain] = newDomain
}

func (this *rateLimitConfigImpl) descriptorToKey(descriptor *pb_struct.RateLimitDescriptor) string {
rateLimitKey := ""
for _, entry := range descriptor.Entries {
if rateLimitKey != "" {
rateLimitKey += "."
}
rateLimitKey += entry.Key
if entry.Value != "" {
rateLimitKey += "_" + entry.Value
}
}
return rateLimitKey
}

func (this *rateLimitConfigImpl) Dump() string {
ret := ""
for _, domain := range this.domains {
Expand All @@ -268,13 +235,12 @@ func (this *rateLimitConfigImpl) GetLimit(
}

if descriptor.GetLimit() != nil {
rateLimitKey := domain + "." + this.descriptorToKey(descriptor)
rateLimitKey := stats.DescriptorKey(domain, descriptor)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what is the reasoning behind moving descriptorToKey function to stats package?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moved to client package (where it is used currently).

rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit())
rateLimit = NewRateLimit(
descriptor.GetLimit().GetRequestsPerUnit(),
rateLimitOverrideUnit,
rateLimitKey,
this.statsScope)
this.manager.NewStats(rateLimitKey))
return rateLimit
}

Expand Down Expand Up @@ -316,9 +282,9 @@ func (this *rateLimitConfigImpl) GetLimit(
// @param stats supplies the stats scope to use for limit stats during runtime.
// @return a new config.
func NewRateLimitConfigImpl(
configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig {
configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: same as above, rename manager to smth like statsManager


ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsScope}
ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, manager}
for _, config := range configs {
ret.loadConfig(config)
}
Expand All @@ -329,9 +295,9 @@ func NewRateLimitConfigImpl(
type rateLimitConfigLoaderImpl struct{}

func (this *rateLimitConfigLoaderImpl) Load(
configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig {
configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig {

return NewRateLimitConfigImpl(configs, statsScope)
return NewRateLimitConfigImpl(configs, manager)
}

// @return a new default config loader implementation.
Expand Down
10 changes: 6 additions & 4 deletions src/config_check_cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@ package main
import (
"flag"
"fmt"
"github.com/envoyproxy/ratelimit/src/settings"
"github.com/envoyproxy/ratelimit/src/stats"
"io/ioutil"
"os"
"path/filepath"

"github.com/envoyproxy/ratelimit/src/config"
"github.com/lyft/gostats"
gostats "github.com/lyft/gostats"
)

func loadConfigs(allConfigs []config.RateLimitConfigToLoad) {
Expand All @@ -19,9 +21,9 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) {
os.Exit(1)
}
}()

dummyStats := stats.NewStore(stats.NewNullSink(), false)
config.NewRateLimitConfigImpl(allConfigs, dummyStats)
settingStruct := settings.NewSettings()
manager := stats.NewStatManager(gostats.NewStore(gostats.NewNullSink(), false), settingStruct)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: inline creation of settings here instead

config.NewRateLimitConfigImpl(allConfigs, manager)
}

func main() {
Expand Down
16 changes: 9 additions & 7 deletions src/limiter/base_limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
"github.com/envoyproxy/ratelimit/src/assert"
"github.com/envoyproxy/ratelimit/src/config"
"github.com/envoyproxy/ratelimit/src/stats"
"github.com/envoyproxy/ratelimit/src/utils"
logger "github.com/sirupsen/logrus"
"math"
Expand All @@ -18,6 +19,7 @@ type BaseRateLimiter struct {
cacheKeyGenerator CacheKeyGenerator
localCache *freecache.Cache
nearLimitRatio float32
Manager stats.Manager
}

type LimitInfo struct {
Expand Down Expand Up @@ -89,7 +91,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT,
limitInfo.limit.Limit, 0)

checkOverLimitThreshold(limitInfo, hitsAddend)
this.checkOverLimitThreshold(limitInfo, hitsAddend)

if this.localCache != nil {
// Set the TTL of the local_cache to be the entire duration.
Expand All @@ -109,25 +111,26 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease)

// The limit is OK but we additionally want to know if we are near the limit.
checkNearLimitThreshold(limitInfo, hitsAddend)
this.checkNearLimitThreshold(limitInfo, hitsAddend)
limitInfo.limit.Stats.WithinLimit.Add(uint64(hitsAddend))
}
return responseDescriptorStatus
}

func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64,
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *BaseRateLimiter {
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stats.Manager) *BaseRateLimiter {
return &BaseRateLimiter{
timeSource: timeSource,
JitterRand: jitterRand,
ExpirationJitterMaxSeconds: expirationJitterMaxSeconds,
cacheKeyGenerator: NewCacheKeyGenerator(cacheKeyPrefix),
localCache: localCache,
nearLimitRatio: nearLimitRatio,
Manager: manager,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

again this field name is quite generic, unless looking at the type, it would be hard to figure out where does this manager belong to

}
}

func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
// Increase over limit statistics. Because we support += behavior for increasing the limit, we need to
// assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the
// N hits was over the limit, then all the N hits were over limit.
Expand All @@ -140,12 +143,11 @@ func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {

// If the limit before increase was below the over limit value, then some of the hits were
// in the near limit range.
limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold -
utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)))
limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)))
}
}

func checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold {
// Here we also need to assess which portion of the hitsAddend were in the near limit range.
// If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise,
Expand Down
11 changes: 6 additions & 5 deletions src/memcached/cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,13 @@ package memcached

import (
"context"
"github.com/envoyproxy/ratelimit/src/stats"
"math/rand"
"strconv"
"sync"

"github.com/coocood/freecache"
stats "github.com/lyft/gostats"
gostats "github.com/lyft/gostats"

"github.com/bradfitz/gomemcache/memcache"

Expand Down Expand Up @@ -174,27 +175,27 @@ func (this *rateLimitMemcacheImpl) Flush() {
}

func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand,
expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache {
expirationJitterMaxSeconds int64, localCache *freecache.Cache, manager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache {
return &rateLimitMemcacheImpl{
client: client,
timeSource: timeSource,
jitterRand: jitterRand,
expirationJitterMaxSeconds: expirationJitterMaxSeconds,
localCache: localCache,
nearLimitRatio: nearLimitRatio,
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix),
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, manager),
}
}

func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand,
localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache {
localCache *freecache.Cache, scope gostats.Scope, manager stats.Manager) limiter.RateLimitCache {
return NewRateLimitCacheImpl(
CollectStats(memcache.New(s.MemcacheHostPort...), scope.Scope("memcache")),
timeSource,
jitterRand,
s.ExpirationJitterMaxSeconds,
localCache,
scope,
manager,
s.NearLimitRatio,
s.CacheKeyPrefix,
)
Expand Down
4 changes: 3 additions & 1 deletion src/redis/cache_impl.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package redis

import (
"github.com/envoyproxy/ratelimit/src/stats"
"math/rand"

"github.com/coocood/freecache"
Expand All @@ -10,7 +11,7 @@ import (
"github.com/envoyproxy/ratelimit/src/utils"
)

func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache {
func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, manager stats.Manager) limiter.RateLimitCache {
var perSecondPool Client
if s.RedisPerSecond {
perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth,
Expand All @@ -29,5 +30,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca
localCache,
s.NearLimitRatio,
s.CacheKeyPrefix,
manager,
)
}
5 changes: 3 additions & 2 deletions src/redis/fixed_cache_impl.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package redis

import (
"github.com/envoyproxy/ratelimit/src/stats"
"math/rand"

"github.com/coocood/freecache"
Expand Down Expand Up @@ -107,10 +108,10 @@ func (this *fixedRateLimitCacheImpl) DoLimit(
func (this *fixedRateLimitCacheImpl) Flush() {}

func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource,
jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache {
jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stats.Manager) limiter.RateLimitCache {
return &fixedRateLimitCacheImpl{
client: client,
perSecondClient: perSecondClient,
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix),
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, manager),
}
}
Loading