Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Metric refactor #242

Merged
merged 20 commits into from
Jun 2, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 4 additions & 13 deletions src/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package config
import (
pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
stats "github.com/lyft/gostats"
"github.com/envoyproxy/ratelimit/src/stats"
"golang.org/x/net/context"
)

Expand All @@ -14,19 +14,10 @@ func (e RateLimitConfigError) Error() string {
return string(e)
}

// Stats for an individual rate limit config entry.
type RateLimitStats struct {
TotalHits stats.Counter
OverLimit stats.Counter
NearLimit stats.Counter
OverLimitWithLocalCache stats.Counter
WithinLimit stats.Counter
}

// Wrapper for an individual rate limit config entry which includes the defined limit and stats.
type RateLimit struct {
FullKey string
Stats RateLimitStats
Stats stats.RateLimitStats
Limit *pb.RateLimitResponse_RateLimit
}

Expand All @@ -53,8 +44,8 @@ type RateLimitConfigToLoad struct {
type RateLimitConfigLoader interface {
// Load a new configuration from a list of YAML files.
// @param configs supplies a list of full YAML files in string form.
// @param statsScope supplies the stats scope to use for limit stats during runtime.
// @param statsManager supplies the statsManager to initialize stats during runtime.
// @return a new configuration.
// @throws RateLimitConfigError if the configuration could not be created.
Load(configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig
Load(configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig
}
82 changes: 31 additions & 51 deletions src/config/config_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (

pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
stats "github.com/lyft/gostats"
"github.com/envoyproxy/ratelimit/src/stats"
logger "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"gopkg.in/yaml.v2"
Expand Down Expand Up @@ -39,8 +39,8 @@ type rateLimitDomain struct {
}

type rateLimitConfigImpl struct {
domains map[string]*rateLimitDomain
statsScope stats.Scope
domains map[string]*rateLimitDomain
statsManager stats.Manager
}

var validKeys = map[string]bool{
Expand All @@ -53,30 +53,15 @@ var validKeys = map[string]bool{
"requests_per_unit": true,
}

// Create new rate limit stats for a config entry.
// @param statsScope supplies the owning scope.
// @param key supplies the fully resolved key name of the entry.
// @return new stats.
func newRateLimitStats(statsScope stats.Scope, key string) RateLimitStats {
ret := RateLimitStats{}
ret.TotalHits = statsScope.NewCounter(key + ".total_hits")
ret.OverLimit = statsScope.NewCounter(key + ".over_limit")
ret.NearLimit = statsScope.NewCounter(key + ".near_limit")
ret.OverLimitWithLocalCache = statsScope.NewCounter(key + ".over_limit_with_local_cache")
ret.WithinLimit = statsScope.NewCounter(key + ".within_limit")
return ret
}

// Create a new rate limit config entry.
// @param requestsPerUnit supplies the requests per unit of time for the entry.
// @param unit supplies the unit of time for the entry.
// @param key supplies the fully resolved key name of the entry.
// @param scope supplies the owning scope.
// @param rlStats supplies the stats structure associated with the RateLimit
// @return the new config entry.
func NewRateLimit(
requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, key string, scope stats.Scope) *RateLimit {
requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit {

return &RateLimit{FullKey: key, Stats: newRateLimitStats(scope, key), Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}}
return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}}
}

// Dump an individual descriptor for debugging purposes.
Expand Down Expand Up @@ -104,10 +89,8 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit
// @param config supplies the config file that owns the descriptor.
// @param parentKey supplies the fully resolved key name that owns this config level.
// @param descriptors supplies the YAML descriptors to load.
// @param statsScope supplies the owning scope.
func (this *rateLimitDescriptor) loadDescriptors(
config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor,
statsScope stats.Scope) {
// @param statsManager that owns the stats.Scope.
func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, statsManager stats.Manager) {

for _, descriptorConfig := range descriptors {
if descriptorConfig.Key == "" {
Expand Down Expand Up @@ -138,8 +121,7 @@ func (this *rateLimitDescriptor) loadDescriptors(
}

rateLimit = NewRateLimit(
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), newParentKey,
statsScope)
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey))
rateLimitDebugString = fmt.Sprintf(
" ratelimit={requests_per_unit=%d, unit=%s}", rateLimit.Limit.RequestsPerUnit,
rateLimit.Limit.Unit.String())
Expand All @@ -148,8 +130,7 @@ func (this *rateLimitDescriptor) loadDescriptors(
logger.Debugf(
"loading descriptor: key=%s%s", newParentKey, rateLimitDebugString)
newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit}
newDescriptor.loadDescriptors(
config, newParentKey+".", descriptorConfig.Descriptors, statsScope)
newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, statsManager)
this.descriptors[finalKey] = newDescriptor
}
}
Expand Down Expand Up @@ -229,24 +210,10 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) {

logger.Debugf("loading domain: %s", root.Domain)
newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}}
newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsScope)
newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsManager)
this.domains[root.Domain] = newDomain
}

func (this *rateLimitConfigImpl) descriptorToKey(descriptor *pb_struct.RateLimitDescriptor) string {
rateLimitKey := ""
for _, entry := range descriptor.Entries {
if rateLimitKey != "" {
rateLimitKey += "."
}
rateLimitKey += entry.Key
if entry.Value != "" {
rateLimitKey += "_" + entry.Value
}
}
return rateLimitKey
}

func (this *rateLimitConfigImpl) Dump() string {
ret := ""
for _, domain := range this.domains {
Expand All @@ -268,13 +235,12 @@ func (this *rateLimitConfigImpl) GetLimit(
}

if descriptor.GetLimit() != nil {
rateLimitKey := domain + "." + this.descriptorToKey(descriptor)
rateLimitKey := descriptorKey(domain, descriptor)
rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit())
rateLimit = NewRateLimit(
descriptor.GetLimit().GetRequestsPerUnit(),
rateLimitOverrideUnit,
rateLimitKey,
this.statsScope)
this.statsManager.NewStats(rateLimitKey))
return rateLimit
}

Expand Down Expand Up @@ -311,14 +277,28 @@ func (this *rateLimitConfigImpl) GetLimit(
return rateLimit
}

func descriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) string {
rateLimitKey := ""
for _, entry := range descriptor.Entries {
if rateLimitKey != "" {
rateLimitKey += "."
}
rateLimitKey += entry.Key
if entry.Value != "" {
rateLimitKey += "_" + entry.Value
}
}
return domain + "." + rateLimitKey
}

// Create rate limit config from a list of input YAML files.
// @param configs specifies a list of YAML files to load.
// @param stats supplies the stats scope to use for limit stats during runtime.
// @return a new config.
func NewRateLimitConfigImpl(
configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig {
configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig {

ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsScope}
ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsManager}
for _, config := range configs {
ret.loadConfig(config)
}
Expand All @@ -329,9 +309,9 @@ func NewRateLimitConfigImpl(
type rateLimitConfigLoaderImpl struct{}

func (this *rateLimitConfigLoaderImpl) Load(
configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig {
configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig {

return NewRateLimitConfigImpl(configs, statsScope)
return NewRateLimitConfigImpl(configs, statsManager)
}

// @return a new default config loader implementation.
Expand Down
9 changes: 5 additions & 4 deletions src/config_check_cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@ package main
import (
"flag"
"fmt"
"github.com/envoyproxy/ratelimit/src/settings"
"github.com/envoyproxy/ratelimit/src/stats"
"io/ioutil"
"os"
"path/filepath"

"github.com/envoyproxy/ratelimit/src/config"
"github.com/lyft/gostats"
gostats "github.com/lyft/gostats"
)

func loadConfigs(allConfigs []config.RateLimitConfigToLoad) {
Expand All @@ -19,9 +21,8 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) {
os.Exit(1)
}
}()

dummyStats := stats.NewStore(stats.NewNullSink(), false)
config.NewRateLimitConfigImpl(allConfigs, dummyStats)
statsManager := stats.NewStatManager(gostats.NewStore(gostats.NewNullSink(), false), settings.NewSettings())
config.NewRateLimitConfigImpl(allConfigs, statsManager)
}

func main() {
Expand Down
16 changes: 9 additions & 7 deletions src/limiter/base_limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
"github.com/envoyproxy/ratelimit/src/assert"
"github.com/envoyproxy/ratelimit/src/config"
"github.com/envoyproxy/ratelimit/src/stats"
"github.com/envoyproxy/ratelimit/src/utils"
logger "github.com/sirupsen/logrus"
"math"
Expand All @@ -18,6 +19,7 @@ type BaseRateLimiter struct {
cacheKeyGenerator CacheKeyGenerator
localCache *freecache.Cache
nearLimitRatio float32
StatsManager stats.Manager
}

type LimitInfo struct {
Expand Down Expand Up @@ -89,7 +91,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT,
limitInfo.limit.Limit, 0)

checkOverLimitThreshold(limitInfo, hitsAddend)
this.checkOverLimitThreshold(limitInfo, hitsAddend)

if this.localCache != nil {
// Set the TTL of the local_cache to be the entire duration.
Expand All @@ -109,25 +111,26 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *
limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease)

// The limit is OK but we additionally want to know if we are near the limit.
checkNearLimitThreshold(limitInfo, hitsAddend)
this.checkNearLimitThreshold(limitInfo, hitsAddend)
limitInfo.limit.Stats.WithinLimit.Add(uint64(hitsAddend))
}
return responseDescriptorStatus
}

func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64,
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *BaseRateLimiter {
localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager) *BaseRateLimiter {
return &BaseRateLimiter{
timeSource: timeSource,
JitterRand: jitterRand,
ExpirationJitterMaxSeconds: expirationJitterMaxSeconds,
cacheKeyGenerator: NewCacheKeyGenerator(cacheKeyPrefix),
localCache: localCache,
nearLimitRatio: nearLimitRatio,
StatsManager: statsManager,
}
}

func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
// Increase over limit statistics. Because we support += behavior for increasing the limit, we need to
// assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the
// N hits was over the limit, then all the N hits were over limit.
Expand All @@ -140,12 +143,11 @@ func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {

// If the limit before increase was below the over limit value, then some of the hits were
// in the near limit range.
limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold -
utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)))
limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)))
}
}

func checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) {
if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold {
// Here we also need to assess which portion of the hitsAddend were in the near limit range.
// If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise,
Expand Down
11 changes: 6 additions & 5 deletions src/memcached/cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,14 @@ package memcached

import (
"context"
"github.com/envoyproxy/ratelimit/src/stats"
"math/rand"
"strconv"
"sync"
"time"

"github.com/coocood/freecache"
stats "github.com/lyft/gostats"
gostats "github.com/lyft/gostats"

"github.com/bradfitz/gomemcache/memcache"

Expand Down Expand Up @@ -274,27 +275,27 @@ func runAsync(task func()) {
}

func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand,
expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache {
expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache {
return &rateLimitMemcacheImpl{
client: client,
timeSource: timeSource,
jitterRand: jitterRand,
expirationJitterMaxSeconds: expirationJitterMaxSeconds,
localCache: localCache,
nearLimitRatio: nearLimitRatio,
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix),
baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager),
}
}

func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand,
localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache {
localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager) limiter.RateLimitCache {
return NewRateLimitCacheImpl(
CollectStats(newMemcacheFromSettings(s), scope.Scope("memcache")),
timeSource,
jitterRand,
s.ExpirationJitterMaxSeconds,
localCache,
scope,
statsManager,
s.NearLimitRatio,
s.CacheKeyPrefix,
)
Expand Down
4 changes: 3 additions & 1 deletion src/redis/cache_impl.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package redis

import (
"github.com/envoyproxy/ratelimit/src/stats"
"math/rand"

"github.com/coocood/freecache"
Expand All @@ -10,7 +11,7 @@ import (
"github.com/envoyproxy/ratelimit/src/utils"
)

func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache {
func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, statsManager stats.Manager) limiter.RateLimitCache {
var perSecondPool Client
if s.RedisPerSecond {
perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth,
Expand All @@ -29,5 +30,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca
localCache,
s.NearLimitRatio,
s.CacheKeyPrefix,
statsManager,
)
}
Loading