Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adds Detailed Metrics feature #237

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 3 additions & 12 deletions src/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package config
import (
pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
stats "github.com/lyft/gostats"
"github.com/envoyproxy/ratelimit/src/stats"
"golang.org/x/net/context"
)

Expand All @@ -14,19 +14,10 @@ func (e RateLimitConfigError) Error() string {
return string(e)
}

// Stats for an individual rate limit config entry.
type RateLimitStats struct {
TotalHits stats.Counter
OverLimit stats.Counter
NearLimit stats.Counter
OverLimitWithLocalCache stats.Counter
WithinLimit stats.Counter
}

// Wrapper for an individual rate limit config entry which includes the defined limit and stats.
type RateLimit struct {
FullKey string
Stats RateLimitStats
Stats stats.RateLimitStats
Limit *pb.RateLimitResponse_RateLimit
}

Expand Down Expand Up @@ -56,5 +47,5 @@ type RateLimitConfigLoader interface {
// @param statsScope supplies the stats scope to use for limit stats during runtime.
// @return a new configuration.
// @throws RateLimitConfigError if the configuration could not be created.
Load(configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig
Load(configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig
}
68 changes: 17 additions & 51 deletions src/config/config_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (

pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
stats "github.com/lyft/gostats"
"github.com/envoyproxy/ratelimit/src/stats"
logger "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"gopkg.in/yaml.v2"
Expand Down Expand Up @@ -39,8 +39,8 @@ type rateLimitDomain struct {
}

type rateLimitConfigImpl struct {
domains map[string]*rateLimitDomain
statsScope stats.Scope
domains map[string]*rateLimitDomain
manager stats.Manager
}

var validKeys = map[string]bool{
Expand All @@ -53,30 +53,15 @@ var validKeys = map[string]bool{
"requests_per_unit": true,
}

// Create new rate limit stats for a config entry.
// @param statsScope supplies the owning scope.
// @param key supplies the fully resolved key name of the entry.
// @return new stats.
func newRateLimitStats(statsScope stats.Scope, key string) RateLimitStats {
ret := RateLimitStats{}
ret.TotalHits = statsScope.NewCounter(key + ".total_hits")
ret.OverLimit = statsScope.NewCounter(key + ".over_limit")
ret.NearLimit = statsScope.NewCounter(key + ".near_limit")
ret.OverLimitWithLocalCache = statsScope.NewCounter(key + ".over_limit_with_local_cache")
ret.WithinLimit = statsScope.NewCounter(key + ".within_limit")
return ret
}

// Create a new rate limit config entry.
// @param requestsPerUnit supplies the requests per unit of time for the entry.
// @param unit supplies the unit of time for the entry.
// @param key supplies the fully resolved key name of the entry.
// @param scope supplies the owning scope.
// @param rlStats supplies the stats structure associated with the RateLimit
// @return the new config entry.
func NewRateLimit(
requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, key string, scope stats.Scope) *RateLimit {
requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit {

return &RateLimit{FullKey: key, Stats: newRateLimitStats(scope, key), Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}}
return &RateLimit{FullKey: rlStats.String(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}}
}

// Dump an individual descriptor for debugging purposes.
Expand Down Expand Up @@ -104,10 +89,8 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit
// @param config supplies the config file that owns the descriptor.
// @param parentKey supplies the fully resolved key name that owns this config level.
// @param descriptors supplies the YAML descriptors to load.
// @param statsScope supplies the owning scope.
func (this *rateLimitDescriptor) loadDescriptors(
config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor,
statsScope stats.Scope) {
// @param manager that owns the stats.Scope.
func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, manager stats.Manager) {

for _, descriptorConfig := range descriptors {
if descriptorConfig.Key == "" {
Expand Down Expand Up @@ -138,8 +121,7 @@ func (this *rateLimitDescriptor) loadDescriptors(
}

rateLimit = NewRateLimit(
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), newParentKey,
statsScope)
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), manager.NewStats(newParentKey))
rateLimitDebugString = fmt.Sprintf(
" ratelimit={requests_per_unit=%d, unit=%s}", rateLimit.Limit.RequestsPerUnit,
rateLimit.Limit.Unit.String())
Expand All @@ -148,8 +130,7 @@ func (this *rateLimitDescriptor) loadDescriptors(
logger.Debugf(
"loading descriptor: key=%s%s", newParentKey, rateLimitDebugString)
newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit}
newDescriptor.loadDescriptors(
config, newParentKey+".", descriptorConfig.Descriptors, statsScope)
newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, manager)
this.descriptors[finalKey] = newDescriptor
}
}
Expand Down Expand Up @@ -229,24 +210,10 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) {

logger.Debugf("loading domain: %s", root.Domain)
newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}}
newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsScope)
newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.manager)
this.domains[root.Domain] = newDomain
}

func (this *rateLimitConfigImpl) descriptorToKey(descriptor *pb_struct.RateLimitDescriptor) string {
rateLimitKey := ""
for _, entry := range descriptor.Entries {
if rateLimitKey != "" {
rateLimitKey += "."
}
rateLimitKey += entry.Key
if entry.Value != "" {
rateLimitKey += "_" + entry.Value
}
}
return rateLimitKey
}

func (this *rateLimitConfigImpl) Dump() string {
ret := ""
for _, domain := range this.domains {
Expand All @@ -268,13 +235,12 @@ func (this *rateLimitConfigImpl) GetLimit(
}

if descriptor.GetLimit() != nil {
rateLimitKey := domain + "." + this.descriptorToKey(descriptor)
rateLimitKey := stats.DescriptorKey(domain, descriptor)
rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit())
rateLimit = NewRateLimit(
descriptor.GetLimit().GetRequestsPerUnit(),
rateLimitOverrideUnit,
rateLimitKey,
this.statsScope)
this.manager.NewStats(rateLimitKey))
return rateLimit
}

Expand Down Expand Up @@ -316,9 +282,9 @@ func (this *rateLimitConfigImpl) GetLimit(
// @param stats supplies the stats scope to use for limit stats during runtime.
// @return a new config.
func NewRateLimitConfigImpl(
configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig {
configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig {

ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsScope}
ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, manager}
for _, config := range configs {
ret.loadConfig(config)
}
Expand All @@ -329,9 +295,9 @@ func NewRateLimitConfigImpl(
type rateLimitConfigLoaderImpl struct{}

func (this *rateLimitConfigLoaderImpl) Load(
configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig {
configs []RateLimitConfigToLoad, manager stats.Manager) RateLimitConfig {

return NewRateLimitConfigImpl(configs, statsScope)
return NewRateLimitConfigImpl(configs, manager)
}

// @return a new default config loader implementation.
Expand Down
10 changes: 6 additions & 4 deletions src/config_check_cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@ package main
import (
"flag"
"fmt"
"github.com/envoyproxy/ratelimit/src/settings"
"github.com/envoyproxy/ratelimit/src/stats"
"io/ioutil"
"os"
"path/filepath"

"github.com/envoyproxy/ratelimit/src/config"
"github.com/lyft/gostats"
gostats "github.com/lyft/gostats"
)

func loadConfigs(allConfigs []config.RateLimitConfigToLoad) {
Expand All @@ -19,9 +21,9 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) {
os.Exit(1)
}
}()

dummyStats := stats.NewStore(stats.NewNullSink(), false)
config.NewRateLimitConfigImpl(allConfigs, dummyStats)
settingStruct := settings.NewSettings()
manager := stats.NewStatManager(gostats.NewStore(gostats.NewNullSink(), false), settingStruct)
config.NewRateLimitConfigImpl(allConfigs, manager)
}

func main() {
Expand Down
48 changes: 27 additions & 21 deletions src/limiter/base_limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
"github.com/envoyproxy/ratelimit/src/assert"
"github.com/envoyproxy/ratelimit/src/config"
"github.com/envoyproxy/ratelimit/src/stats"
"github.com/envoyproxy/ratelimit/src/utils"
logger "github.com/sirupsen/logrus"
"math"
Expand All @@ -18,6 +19,7 @@ type BaseRateLimiter struct {
cacheKeyGenerator CacheKeyGenerator
localCache *freecache.Cache
nearLimitRatio float32
Manager stats.Manager
}

type LimitInfo struct {
Expand Down Expand Up @@ -47,7 +49,7 @@ func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest,
cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now)
// Increase statistics for limits hit by their respective requests.
if limits[i] != nil {
limits[i].Stats.TotalHits.Add(uint64(hitsAddend))
this.Manager.AddTotalHits(uint64(hitsAddend), limits[i].Stats, stats.DescriptorKey(request.Domain, request.Descriptors[i]))
}
}
return cacheKeys
Expand All @@ -67,15 +69,15 @@ func (this *BaseRateLimiter) IsOverLimitWithLocalCache(key string) bool {

// Generates response descriptor status based on cache key, over the limit with local cache, over the limit and
// near the limit thresholds. Thresholds are checked in order and are mutually exclusive.
func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo,
isOverLimitWithLocalCache bool, hitsAddend uint32) *pb.RateLimitResponse_DescriptorStatus {
if key == "" {
func (this *BaseRateLimiter) GetResponseDescriptorStatus(localCacheKey string, limitInfo *LimitInfo,
isOverLimitWithLocalCache bool, hitsAddend uint32, descriptorKey string) *pb.RateLimitResponse_DescriptorStatus {
if localCacheKey == "" {
return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK,
nil, 0)
}
if isOverLimitWithLocalCache {
limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend))
limitInfo.limit.Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend))
this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey)
this.Manager.AddOverLimitWithLocalCache(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey)
return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT,
limitInfo.limit.Limit, 0)
}
Expand All @@ -84,12 +86,12 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
// The nearLimitThreshold is the number of requests that can be made before hitting the nearLimitRatio.
// We need to know it in both the OK and OVER_LIMIT scenarios.
limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio)))
logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease)
logger.Debugf("cache localCacheKey: %s current: %d", localCacheKey, limitInfo.limitAfterIncrease)
if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold {
responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT,
limitInfo.limit.Limit, 0)

checkOverLimitThreshold(limitInfo, hitsAddend)
this.checkOverLimitThreshold(limitInfo, hitsAddend, descriptorKey)

if this.localCache != nil {
// Set the TTL of the local_cache to be the entire duration.
Expand All @@ -99,62 +101,66 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
// similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start
// to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m).
// In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited.
err := this.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit)))
err := this.localCache.Set([]byte(localCacheKey), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit)))
if err != nil {
logger.Errorf("Failing to set local cache key: %s", key)
logger.Errorf("Failing to set local cache localCacheKey: %s", localCacheKey)
}
}
} else {
responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK,
limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease)

// The limit is OK but we additionally want to know if we are near the limit.
checkNearLimitThreshold(limitInfo, hitsAddend)
limitInfo.limit.Stats.WithinLimit.Add(uint64(hitsAddend))
this.checkNearLimitThreshold(limitInfo, hitsAddend, descriptorKey)
this.Manager.AddWithinLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey)
}
return responseDescriptorStatus
}

func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64,
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *BaseRateLimiter {
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, manager stats.Manager) *BaseRateLimiter {
return &BaseRateLimiter{
timeSource: timeSource,
JitterRand: jitterRand,
ExpirationJitterMaxSeconds: expirationJitterMaxSeconds,
cacheKeyGenerator: NewCacheKeyGenerator(cacheKeyPrefix),
localCache: localCache,
nearLimitRatio: nearLimitRatio,
Manager: manager,
}
}

func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, descriptorKey string) {
// Increase over limit statistics. Because we support += behavior for increasing the limit, we need to
// assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the
// N hits was over the limit, then all the N hits were over limit.
// Otherwise, only the difference between the current limit value and the over limit threshold
// were over limit hits.
if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold {
limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend))
this.Manager.AddOverLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey)
} else {
limitInfo.limit.Stats.OverLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold))
this.Manager.AddOverLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.overLimitThreshold), limitInfo.limit.Stats, descriptorKey)

// If the limit before increase was below the over limit value, then some of the hits were
// in the near limit range.
limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold -
utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)))
this.Manager.AddNearLimit(
uint64(limitInfo.overLimitThreshold-utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)),
limitInfo.limit.Stats,
descriptorKey,
)
}
}

func checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32, descriptorKey string) {
if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold {
// Here we also need to assess which portion of the hitsAddend were in the near limit range.
// If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise,
// only the difference between the current limit value and the near limit threshold were near
// limit hits.
if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold {
limitInfo.limit.Stats.NearLimit.Add(uint64(hitsAddend))
this.Manager.AddNearLimit(uint64(hitsAddend), limitInfo.limit.Stats, descriptorKey)
} else {
limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold))
this.Manager.AddNearLimit(uint64(limitInfo.limitAfterIncrease-limitInfo.nearLimitThreshold), limitInfo.limit.Stats, descriptorKey)
}
}
}
Expand Down
Loading