Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Explicit unlimited ratelimits #261

Merged
23 changes: 23 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
- [Example 2](#example-2)
- [Example 3](#example-3)
- [Example 4](#example-4)
- [Example 5](#example-5)
- [Loading Configuration](#loading-configuration)
- [Log Format](#log-format)
- [Request Fields](#request-fields)
Expand Down Expand Up @@ -329,6 +330,28 @@ descriptors:
unit: second
```

#### Example 5

We can also define unlimited rate limit descriptors:

```yaml
domain: internal
descriptors:
- key: ldap
rate_limit:
unlimited: true

- key: azure
rate_limit:
unit: minute
requests_per_unit: 100
```

For an unlimited descriptor, the request will not be sent to the underlying cache (Redis/Memcached), but will be quickly returned locally by the ratelimit instance.
This can be useful for collecting statistics, or if one wants to define a descriptor that has no limit but the client wants to distinguish between such descriptor and one that does not exist.

The return value for unlimited descriptors will be an OK status code with the LimitRemaining field set to MaxUint32 value.

## Loading Configuration

The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors
Expand Down
6 changes: 6 additions & 0 deletions examples/ratelimit/config/example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,9 @@ descriptors:
rate_limit:
unit: second
requests_per_unit: 1
- key: bay
rate_limit:
unlimited: true
- key: qux
rate_limit:
unlimited: true
7 changes: 4 additions & 3 deletions src/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@ func (e RateLimitConfigError) Error() string {

// Wrapper for an individual rate limit config entry which includes the defined limit and stats.
type RateLimit struct {
FullKey string
Stats stats.RateLimitStats
Limit *pb.RateLimitResponse_RateLimit
FullKey string
Stats stats.RateLimitStats
Limit *pb.RateLimitResponse_RateLimit
Unlimited bool
}

// Interface for interacting with a loaded rate limit config.
Expand Down
30 changes: 23 additions & 7 deletions src/config/config_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
type yamlRateLimit struct {
RequestsPerUnit uint32 `yaml:"requests_per_unit"`
Unit string
Unlimited bool `yaml:"unlimited"`
}

type yamlDescriptor struct {
Expand Down Expand Up @@ -51,17 +52,19 @@ var validKeys = map[string]bool{
"rate_limit": true,
"unit": true,
"requests_per_unit": true,
"unlimited": true,
}

// Create a new rate limit config entry.
// @param requestsPerUnit supplies the requests per unit of time for the entry.
// @param unit supplies the unit of time for the entry.
// @param rlStats supplies the stats structure associated with the RateLimit
// @param unlimited supplies whether the rate limit is unlimited
// @return the new config entry.
func NewRateLimit(
requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit {
requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, unlimited bool) *RateLimit {

return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}}
return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, Unlimited: unlimited}
}

// Dump an individual descriptor for debugging purposes.
Expand Down Expand Up @@ -112,19 +115,29 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p
var rateLimit *RateLimit = nil
var rateLimitDebugString string = ""
if descriptorConfig.RateLimit != nil {
unlimited := descriptorConfig.RateLimit.Unlimited

value, present :=
pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)]
if !present || value == int32(pb.RateLimitResponse_RateLimit_UNKNOWN) {
validUnit := present && value != int32(pb.RateLimitResponse_RateLimit_UNKNOWN)

if unlimited {
if validUnit {
panic(newRateLimitConfigError(
config,
fmt.Sprintf("should not specify rate limit unit when unlimited")))
}
} else if !validUnit {
panic(newRateLimitConfigError(
config,
fmt.Sprintf("invalid rate limit unit '%s'", descriptorConfig.RateLimit.Unit)))
}

rateLimit = NewRateLimit(
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey))
descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey), unlimited)
rateLimitDebugString = fmt.Sprintf(
" ratelimit={requests_per_unit=%d, unit=%s}", rateLimit.Limit.RequestsPerUnit,
rateLimit.Limit.Unit.String())
" ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t}", rateLimit.Limit.RequestsPerUnit,
rateLimit.Limit.Unit.String(), rateLimit.Unlimited)
}

logger.Debugf(
Expand Down Expand Up @@ -167,6 +180,8 @@ func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]i
case string:
// int is a leaf type in ratelimit config. No need to keep validating.
case int:
// bool is a leaf type in ratelimit config. No need to keep validating.
case bool:
// nil case is an incorrectly formed yaml. However, because this function's purpose is to validate
// the yaml's keys we don't panic here.
case nil:
Expand Down Expand Up @@ -240,7 +255,8 @@ func (this *rateLimitConfigImpl) GetLimit(
rateLimit = NewRateLimit(
descriptor.GetLimit().GetRequestsPerUnit(),
rateLimitOverrideUnit,
this.statsManager.NewStats(rateLimitKey))
this.statsManager.NewStats(rateLimitKey),
false)
return rateLimit
}

Expand Down
52 changes: 38 additions & 14 deletions src/service/ratelimit.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package ratelimit
import (
"fmt"
"github.com/envoyproxy/ratelimit/src/stats"
"math"
"strings"
"sync"

Expand Down Expand Up @@ -76,16 +77,13 @@ func checkServiceErr(something bool, msg string) {
}
}

func (this *service) shouldRateLimitWorker(
ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse {

checkServiceErr(request.Domain != "", "rate limit domain must not be empty")
checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty")

func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx context.Context) ([]*config.RateLimit, []bool) {
snappedConfig := this.GetCurrentConfig()
checkServiceErr(snappedConfig != nil, "no rate limit configuration loaded")

limitsToCheck := make([]*config.RateLimit, len(request.Descriptors))
isUnlimited := make([]bool, len(request.Descriptors))

for i, descriptor := range request.Descriptors {
if logger.IsLevelEnabled(logger.DebugLevel) {
var descriptorEntryStrings []string
Expand All @@ -102,14 +100,33 @@ func (this *service) shouldRateLimitWorker(
if limitsToCheck[i] == nil {
logger.Debugf("descriptor does not match any limit, no limits applied")
} else {
logger.Debugf(
"applying limit: %d requests per %s",
limitsToCheck[i].Limit.RequestsPerUnit,
limitsToCheck[i].Limit.Unit.String(),
)
if limitsToCheck[i].Unlimited {
logger.Debugf("descriptor is unlimited, not passing to the cache")
} else {
logger.Debugf(
"applying limit: %d requests per %s",
limitsToCheck[i].Limit.RequestsPerUnit,
limitsToCheck[i].Limit.Unit.String(),
)
}
}
}

if limitsToCheck[i] != nil && limitsToCheck[i].Unlimited {
isUnlimited[i] = true
limitsToCheck[i] = nil
}
}
return limitsToCheck, isUnlimited
}

func (this *service) shouldRateLimitWorker(
ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse {

checkServiceErr(request.Domain != "", "rate limit domain must not be empty")
checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty")

limitsToCheck, isUnlimited := this.constructLimitsToCheck(request, ctx)

responseDescriptorStatuses := this.cache.DoLimit(ctx, request, limitsToCheck)
assert.Assert(len(limitsToCheck) == len(responseDescriptorStatuses))
Expand All @@ -118,9 +135,16 @@ func (this *service) shouldRateLimitWorker(
response.Statuses = make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors))
finalCode := pb.RateLimitResponse_OK
for i, descriptorStatus := range responseDescriptorStatuses {
response.Statuses[i] = descriptorStatus
if descriptorStatus.Code == pb.RateLimitResponse_OVER_LIMIT {
finalCode = descriptorStatus.Code
if isUnlimited[i] {
response.Statuses[i] = &pb.RateLimitResponse_DescriptorStatus{
Code: pb.RateLimitResponse_OK,
LimitRemaining: math.MaxUint32,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you document this somewhere? It should probably go directly in the proto docs as a comment? Or potentially also somewhere in the README?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure what you mean by the proto docs, I added a comment to the README

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean doing a PR here also: https://github.com/envoyproxy/envoy/blob/7c1991ffe382bacf2bf415a643c5ea7428d0880e/api/envoy/service/ratelimit/v3/rls.proto#L130-L131

This is the canonical service definition for the ratelimit service.

}
} else {
response.Statuses[i] = descriptorStatus
if descriptorStatus.Code == pb.RateLimitResponse_OVER_LIMIT {
finalCode = descriptorStatus.Code
}
}
}

Expand Down
4 changes: 4 additions & 0 deletions test/config/basic_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,3 +56,7 @@ descriptors:
rate_limit:
unit: day
requests_per_unit: 25

- key: key6
rate_limit:
unlimited: true
22 changes: 22 additions & 0 deletions test/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,17 @@ func TestBasicConfig(t *testing.T) {
assert.EqualValues(1, stats.NewCounter("test-domain.key4.over_limit").Value())
assert.EqualValues(1, stats.NewCounter("test-domain.key4.near_limit").Value())
assert.EqualValues(1, stats.NewCounter("test-domain.key4.within_limit").Value())

rl = rlConfig.GetLimit(
nil, "test-domain",
&pb_struct.RateLimitDescriptor{
Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key6", Value: "foo"}},
})
rl.Stats.TotalHits.Inc()
rl.Stats.WithinLimit.Inc()
assert.True(rl.Unlimited)
assert.EqualValues(1, stats.NewCounter("test-domain.key6.total_hits").Value())
assert.EqualValues(1, stats.NewCounter("test-domain.key6.within_limit").Value())
}

func TestConfigLimitOverride(t *testing.T) {
Expand Down Expand Up @@ -364,3 +375,14 @@ func TestNonMapList(t *testing.T) {
},
"non_map_list.yaml: config error, yaml file contains list of type other than map: a")
}

func TestUnlimitedWithRateLimitUnit(t *testing.T) {
expectConfigPanic(
t,
func() {
config.NewRateLimitConfigImpl(
loadFile("unlimited_with_unit.yaml"),
mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)))
},
"unlimited_with_unit.yaml: should not specify rate limit unit when unlimited")
}
7 changes: 7 additions & 0 deletions test/config/unlimited_with_unit.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
domain: test-domain
descriptors:
- key: foo
rate_limit:
unlimited: true
unit: day
requests_per_unit: 25
10 changes: 5 additions & 5 deletions test/limiter/base_limiter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func TestGenerateCacheKeys(t *testing.T) {
timeSource.EXPECT().UnixNow().Return(int64(1234))
baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm)
request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1)
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))}
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)}
assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value())
cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1)
assert.Equal(1, len(cacheKeys))
Expand All @@ -46,7 +46,7 @@ func TestGenerateCacheKeysPrefix(t *testing.T) {
timeSource.EXPECT().UnixNow().Return(int64(1234))
baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm)
request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1)
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))}
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)}
assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value())
cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1)
assert.Equal(1, len(cacheKeys))
Expand Down Expand Up @@ -100,7 +100,7 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) {
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm)
limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))}
limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5)
// As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits.
responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2)
Expand All @@ -121,7 +121,7 @@ func TestGetResponseStatusOverLimit(t *testing.T) {
localCache := freecache.NewCache(100)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm)
limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))}
limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5)
responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1)
assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode())
Expand All @@ -143,7 +143,7 @@ func TestGetResponseStatusBelowLimit(t *testing.T) {
statsStore := stats.NewStore(stats.NewNullSink(), false)
sm := mockstats.NewMockStatManager(statsStore)
baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm)
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))}
limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)}
limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10)
responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1)
assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode())
Expand Down
Loading