diff --git a/OWNERS.md b/OWNERS.md deleted file mode 100644 index ac3a5660..00000000 --- a/OWNERS.md +++ /dev/null @@ -1,2 +0,0 @@ -* Matt Klein ([mattklein123](https://github.com/mattklein123)) (mklein@lyft.com) -* Yuki Sawa ([ysawa0](https://github.com/ysawa0)) (yukisawa@gmail.com) diff --git a/README.md b/README.md index adba6164..9261f71c 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,8 @@ - [Overview](#overview) - [Docker Image](#docker-image) -- [Supported Envoy APIs](#supported-envoy-apis) - - [API Deprecation History](#api-deprecation-history) +- [Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto](#deprecation-of-legacy-ratelimit-proto-and-v2-ratelimit-proto) + - [Deprecation Schedule](#deprecation-schedule) - [Building and Testing](#building-and-testing) - [Docker-compose setup](#docker-compose-setup) - [Full test environment](#full-test-environment) @@ -52,12 +52,16 @@ decision is then returned to the caller. # Docker Image For every main commit, an image is pushed to [Dockerhub](https://hub.docker.com/r/envoyproxy/ratelimit/tags?page=1&ordering=last_updated). There is currently no versioning (post v1.4.0) and tags are based on commit sha. -# Supported Envoy APIs +# Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto -[v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is currently supported. -Support for [v2 rls proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) is now deprecated. +Envoy's data-plane-api defines a ratelimit service proto v3 [rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto). +Logically the data-plane-api rls [v3](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +is equivalent to the rls [v2](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto). +However, due to the namespace differences and how gRPC routing works it is not possible to transparently route the legacy v2 ratelimit requests to the v3 definitions. +Therefore, the ratelimit service will upgrade the requests, process them internally as it would process a v3 ratelimit request, and then downgrade the response to send back to the client. This means that, +for a slight performance hit for clients using the legacy proto, ratelimit is backwards compatible with the legacy proto. -## API Deprecation History +## Deprecation Schedule 1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production use at Lyft for over 2 years. 2. `v1.1.0` introduces the data-plane-api proto and initiates the deprecation of the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). @@ -65,7 +69,7 @@ Support for [v2 rls proto](https://github.com/envoyproxy/data-plane-api/blob/mas The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported as a legacy protocol. -4. `4bb32826` deleted support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +4. `TODO` deletes support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) # Building and Testing diff --git a/go.sum b/go.sum index ff594f00..071a59b3 100644 --- a/go.sum +++ b/go.sum @@ -36,12 +36,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= @@ -74,9 +76,11 @@ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= @@ -97,6 +101,7 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -112,8 +117,11 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -123,8 +131,10 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index b8d1c0bd..8ac20019 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -20,6 +20,7 @@ import ( type RateLimitServiceServer interface { pb.RateLimitServiceServer GetCurrentConfig() config.RateLimitConfig + GetLegacyService() RateLimitLegacyServiceServer } type service struct { @@ -30,6 +31,7 @@ type service struct { runtimeUpdateEvent chan int cache limiter.RateLimitCache stats stats.ServiceStats + legacy *legacyService runtimeWatchRoot bool } @@ -183,6 +185,10 @@ func (this *service) ShouldRateLimit( return response, nil } +func (this *service) GetLegacyService() RateLimitLegacyServiceServer { + return this.legacy +} + func (this *service) GetCurrentConfig() config.RateLimitConfig { this.configLock.RLock() defer this.configLock.RUnlock() @@ -202,6 +208,10 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache, stats: statsManager.NewServiceStats(), runtimeWatchRoot: runtimeWatchRoot, } + newService.legacy = &legacyService{ + s: newService, + shouldRateLimitLegacyStats: statsManager.NewShouldRateLimitLegacyStats(), + } runtime.AddUpdateCallback(newService.runtimeUpdateEvent) diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go new file mode 100644 index 00000000..ac3971e0 --- /dev/null +++ b/src/service/ratelimit_legacy.go @@ -0,0 +1,135 @@ +package ratelimit + +import ( + core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/stats" + "golang.org/x/net/context" +) + +type RateLimitLegacyServiceServer interface { + pb_legacy.RateLimitServiceServer +} + +// legacyService is used to implement v2 rls.proto (https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) +// the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. +type legacyService struct { + s *service + shouldRateLimitLegacyStats stats.ShouldRateLimitLegacyStats +} + +func (this *legacyService) ShouldRateLimit( + ctx context.Context, + legacyRequest *pb_legacy.RateLimitRequest) (finalResponse *pb_legacy.RateLimitResponse, finalError error) { + + request, err := ConvertLegacyRequest(legacyRequest) + if err != nil { + this.shouldRateLimitLegacyStats.ReqConversionError.Inc() + return nil, err + } + resp, err := this.s.ShouldRateLimit(ctx, request) + if err != nil { + this.shouldRateLimitLegacyStats.ShouldRateLimitError.Inc() + return nil, err + } + + legacyResponse, err := ConvertResponse(resp) + if err != nil { + this.shouldRateLimitLegacyStats.RespConversionError.Inc() + return nil, err + } + + return legacyResponse, nil +} + +func ConvertLegacyRequest(legacyRequest *pb_legacy.RateLimitRequest) (*pb.RateLimitRequest, error) { + if legacyRequest == nil { + return nil, nil + } + request := &pb.RateLimitRequest{ + Domain: legacyRequest.GetDomain(), + HitsAddend: legacyRequest.GetHitsAddend(), + } + if legacyRequest.GetDescriptors() != nil { + descriptors := make([]*pb_struct.RateLimitDescriptor, len(legacyRequest.GetDescriptors())) + for i, descriptor := range legacyRequest.GetDescriptors() { + if descriptor != nil { + descriptors[i] = &pb_struct.RateLimitDescriptor{} + if descriptor.GetEntries() != nil { + entries := make([]*pb_struct.RateLimitDescriptor_Entry, len(descriptor.GetEntries())) + for j, entry := range descriptor.GetEntries() { + if entry != nil { + entries[j] = &pb_struct.RateLimitDescriptor_Entry{ + Key: entry.GetKey(), + Value: entry.GetValue(), + } + } + } + descriptors[i].Entries = entries + } + } + } + request.Descriptors = descriptors + } + return request, nil +} + +func ConvertResponse(response *pb.RateLimitResponse) (*pb_legacy.RateLimitResponse, error) { + if response == nil { + return nil, nil + } + + legacyResponse := &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_Code(response.GetOverallCode()), + } + + if response.GetStatuses() != nil { + statuses := make([]*pb_legacy.RateLimitResponse_DescriptorStatus, len(response.GetStatuses())) + for i, status := range response.GetStatuses() { + if status != nil { + statuses[i] = &pb_legacy.RateLimitResponse_DescriptorStatus{ + Code: pb_legacy.RateLimitResponse_Code(status.GetCode()), + LimitRemaining: status.GetLimitRemaining(), + } + if status.GetCurrentLimit() != nil { + statuses[i].CurrentLimit = &pb_legacy.RateLimitResponse_RateLimit{ + Name: status.GetCurrentLimit().GetName(), + RequestsPerUnit: status.GetCurrentLimit().GetRequestsPerUnit(), + Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(status.GetCurrentLimit().GetUnit()), + } + } + } + } + legacyResponse.Statuses = statuses + } + + if response.GetRequestHeadersToAdd() != nil { + requestHeadersToAdd := make([]*core_legacy.HeaderValue, len(response.GetRequestHeadersToAdd())) + for i, header := range response.GetRequestHeadersToAdd() { + if header != nil { + requestHeadersToAdd[i] = &core_legacy.HeaderValue{ + Key: header.GetKey(), + Value: header.GetValue(), + } + } + } + legacyResponse.RequestHeadersToAdd = requestHeadersToAdd + } + + if response.GetResponseHeadersToAdd() != nil { + responseHeadersToAdd := make([]*core_legacy.HeaderValue, len(response.GetResponseHeadersToAdd())) + for i, header := range response.GetResponseHeadersToAdd() { + if header != nil { + responseHeadersToAdd[i] = &core_legacy.HeaderValue{ + Key: header.GetKey(), + Value: header.GetValue(), + } + } + } + legacyResponse.Headers = responseHeadersToAdd + } + + return legacyResponse, nil +} diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 31865045..c8fb45e3 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -14,6 +14,7 @@ import ( "github.com/coocood/freecache" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" @@ -118,10 +119,12 @@ func (runner *Runner) Run() { srv.AddJsonHandler(service) - // Ratelimit is compatible with the below proto definition - // data-plane-api v3 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto - // v2 proto is no longer supported + // Ratelimit is compatible with two proto definitions + // 1. data-plane-api v3 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto pb.RegisterRateLimitServiceServer(srv.GrpcServer(), service) + // 1. data-plane-api v2 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto + pb_legacy.RegisterRateLimitServiceServer(srv.GrpcServer(), service.GetLegacyService()) + // (1) is the current definition, and (2) is the legacy definition. srv.Start() } diff --git a/src/stats/manager.go b/src/stats/manager.go index 6c54e474..a96753b7 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -14,6 +14,9 @@ type Manager interface { // Initializes a ServiceStats structure. // Multiple calls to this method are idempotent. NewServiceStats() ServiceStats + // Initializes a ShouldRateLimitLegacyStats structure. + // Multiple calls to this method are idempotent. + NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats // Returns the stats.Store wrapped by the Manager. GetStatsStore() stats.Store } @@ -21,6 +24,7 @@ type Manager interface { type ManagerImpl struct { store gostats.Store rlStatsScope gostats.Scope + legacyStatsScope gostats.Scope serviceStatsScope gostats.Scope shouldRateLimitScope gostats.Scope } @@ -40,6 +44,13 @@ type ServiceStats struct { ShouldRateLimit ShouldRateLimitStats } +// Legacy Stats for ratelimit errors. +type ShouldRateLimitLegacyStats struct { + ReqConversionError gostats.Counter + RespConversionError gostats.Counter + ShouldRateLimitError gostats.Counter +} + // Stats for an individual rate limit config entry. type RateLimitStats struct { Key string diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index e7b6a0b1..48a01b1a 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -11,6 +11,7 @@ func NewStatManager(store gostats.Store, settings settings.Settings) *ManagerImp return &ManagerImpl{ store: store, rlStatsScope: serviceScope.Scope("rate_limit"), + legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), serviceStatsScope: serviceScope, shouldRateLimitScope: serviceScope.Scope("call.should_rate_limit"), } @@ -35,6 +36,14 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { return ret } +func (this *ManagerImpl) NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats { + return ShouldRateLimitLegacyStats{ + ReqConversionError: this.legacyStatsScope.NewCounter("req_conversion_error"), + RespConversionError: this.legacyStatsScope.NewCounter("resp_conversion_error"), + ShouldRateLimitError: this.legacyStatsScope.NewCounter("should_rate_limit_error"), + } +} + func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { ret := ShouldRateLimitStats{} ret.RedisError = this.shouldRateLimitScope.NewCounter("redis_error") diff --git a/test/common/common.go b/test/common/common.go index 54216f1f..b90c536e 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -15,7 +15,9 @@ import ( "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" + pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" ) @@ -64,6 +66,22 @@ func NewRateLimitRequest(domain string, descriptors [][][2]string, hitsAddend ui return request } +func NewRateLimitRequestLegacy(domain string, descriptors [][][2]string, hitsAddend uint32) *pb_legacy.RateLimitRequest { + request := &pb_legacy.RateLimitRequest{} + request.Domain = domain + for _, descriptor := range descriptors { + newDescriptor := &pb_struct_legacy.RateLimitDescriptor{} + for _, entry := range descriptor { + newDescriptor.Entries = append( + newDescriptor.Entries, + &pb_struct_legacy.RateLimitDescriptor_Entry{Key: entry[0], Value: entry[1]}) + } + request.Descriptors = append(request.Descriptors, newDescriptor) + } + request.HitsAddend = hitsAddend + return request +} + func AssertProtoEqual(assert *assert.Assertions, expected proto.Message, actual proto.Message) { assert.True(proto.Equal(expected, actual), fmt.Sprintf("These two protobuf messages are not equal:\nexpected: %v\nactual: %v", expected, actual)) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2699b405..83fb0ba6 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/service_cmd/runner" @@ -63,6 +64,17 @@ func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint3 } } +func newDescriptorStatusLegacy( + status pb_legacy.RateLimitResponse_Code, requestsPerUnit uint32, + unit pb_legacy.RateLimitResponse_RateLimit_Unit, limitRemaining uint32) *pb_legacy.RateLimitResponse_DescriptorStatus { + + return &pb_legacy.RateLimitResponse_DescriptorStatus{ + Code: status, + CurrentLimit: &pb_legacy.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, + LimitRemaining: limitRemaining, + } +} + func makeSimpleRedisSettings(redisPort int, perSecondPort int, perSecond bool, localCacheSize int) settings.Settings { s := defaultSettings() @@ -589,6 +601,107 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { } } +func TestBasicConfigLegacy(t *testing.T) { + common.WithMultiRedis(t, []common.RedisConfig{ + {Port: 6383}, + }, func() { + testBasicConfigLegacy(t) + }) +} + +func testBasicConfigLegacy(t *testing.T) { + s := makeSimpleRedisSettings(6383, 6380, false, 0) + + runner := startTestRunner(t, s) + defer runner.Stop() + + assert := assert.New(t) + conn, err := grpc.Dial("localhost:8083", grpc.WithInsecure()) + + assert.NoError(err) + defer conn.Close() + c := pb_legacy.NewRateLimitServiceClient(conn) + + response, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequestLegacy("foo", [][][2]string{{{"hello", "world"}}}, 1)) + common.AssertProtoEqual( + assert, + &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OK, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, + response) + assert.NoError(err) + + response, err = c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{"key1", "foo"}}}, 1)) + common.AssertProtoEqual( + assert, + &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OK, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ + newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimitResponse_RateLimit_SECOND, 49)}}, + response) + assert.NoError(err) + + // Now come up with a random key, and go over limit for a minute limit which should always work. + r := rand.New(rand.NewSource(time.Now().UnixNano())) + randomInt := r.Int() + for i := 0; i < 25; i++ { + response, err = c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequestLegacy( + "another", [][][2]string{{{"key2", strconv.Itoa(randomInt)}}}, 1)) + + status := pb_legacy.RateLimitResponse_OK + limitRemaining := uint32(20 - (i + 1)) + if i >= 20 { + status = pb_legacy.RateLimitResponse_OVER_LIMIT + limitRemaining = 0 + } + + common.AssertProtoEqual( + assert, + &pb_legacy.RateLimitResponse{ + OverallCode: status, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ + newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimitResponse_RateLimit_MINUTE, limitRemaining)}}, + response) + assert.NoError(err) + } + + // Limit now against 2 keys in the same domain. + randomInt = r.Int() + for i := 0; i < 15; i++ { + response, err = c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequestLegacy( + "another_legacy", + [][][2]string{ + {{"key2", strconv.Itoa(randomInt)}}, + {{"key3", strconv.Itoa(randomInt)}}}, 1)) + + status := pb_legacy.RateLimitResponse_OK + limitRemaining1 := uint32(20 - (i + 1)) + limitRemaining2 := uint32(10 - (i + 1)) + if i >= 10 { + status = pb_legacy.RateLimitResponse_OVER_LIMIT + limitRemaining2 = 0 + } + + common.AssertProtoEqual( + assert, + &pb_legacy.RateLimitResponse{ + OverallCode: status, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ + newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimitResponse_RateLimit_MINUTE, limitRemaining1), + newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, + response) + assert.NoError(err) + } +} + func startTestRunner(t *testing.T, s settings.Settings) *runner.Runner { t.Helper() runner := runner.NewRunner(s) diff --git a/test/integration/runtime/current/ratelimit/config/another_legacy.yaml b/test/integration/runtime/current/ratelimit/config/another_legacy.yaml new file mode 100644 index 00000000..377f90bd --- /dev/null +++ b/test/integration/runtime/current/ratelimit/config/another_legacy.yaml @@ -0,0 +1,11 @@ +domain: another_legacy +descriptors: + - key: key2 + rate_limit: + unit: minute + requests_per_unit: 20 + + - key: key3 + rate_limit: + unit: hour + requests_per_unit: 10 \ No newline at end of file diff --git a/test/integration/runtime/current/ratelimit/config/basic_legacy.yaml b/test/integration/runtime/current/ratelimit/config/basic_legacy.yaml new file mode 100644 index 00000000..3135da55 --- /dev/null +++ b/test/integration/runtime/current/ratelimit/config/basic_legacy.yaml @@ -0,0 +1,6 @@ +domain: basic_legacy +descriptors: + - key: key1 + rate_limit: + unit: second + requests_per_unit: 50 diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index cbf5bbf1..52282e72 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -30,6 +30,15 @@ func (m *MockStatManager) NewServiceStats() stats.ServiceStats { return ret } +func (m *MockStatManager) NewShouldRateLimitLegacyStats() stats.ShouldRateLimitLegacyStats { + s := m.store.Scope("call.should_rate_limit_legacy") + return stats.ShouldRateLimitLegacyStats{ + ReqConversionError: s.NewCounter("req_conversion_error"), + RespConversionError: s.NewCounter("resp_conversion_error"), + ShouldRateLimitError: s.NewCounter("should_rate_limit_error"), + } +} + func (m *MockStatManager) NewStats(key string) stats.RateLimitStats { ret := stats.RateLimitStats{} logger.Debugf("outputing test gostats %s", key) diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go new file mode 100644 index 00000000..77fb31f7 --- /dev/null +++ b/test/service/ratelimit_legacy_test.go @@ -0,0 +1,429 @@ +package ratelimit_test + +import ( + "github.com/envoyproxy/ratelimit/src/stats" + "testing" + + core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/service" + "github.com/envoyproxy/ratelimit/test/common" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.RateLimitResponse_RateLimit, error) { + if ratelimit == nil { + return nil, nil + } + + return &pb_legacy.RateLimitResponse_RateLimit{ + Name: ratelimit.GetName(), + RequestsPerUnit: ratelimit.GetRequestsPerUnit(), + Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(ratelimit.GetUnit()), + }, nil +} + +func convertRatelimits(ratelimits []*config.RateLimit) ([]*pb_legacy.RateLimitResponse_RateLimit, error) { + if ratelimits == nil { + return nil, nil + } + + ret := make([]*pb_legacy.RateLimitResponse_RateLimit, 0) + for _, rl := range ratelimits { + if rl == nil { + ret = append(ret, nil) + continue + } + legacyRl, err := convertRatelimit(rl.Limit) + if err != nil { + return nil, err + } + ret = append(ret, legacyRl) + } + + return ret, nil +} + +func TestServiceLegacy(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + // First request, config should be loaded. + legacyRequest := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) + req, err := ratelimit.ConvertLegacyRequest(legacyRequest) + if err != nil { + t.assert.FailNow(err.Error()) + } + t.config.EXPECT().GetLimit(nil, "test-domain", req.Descriptors[0]).Return(nil) + t.cache.EXPECT().DoLimit(nil, req, []*config.RateLimit{nil}).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + + response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) + common.AssertProtoEqual( + t.assert, + &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OK, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, + response) + t.assert.Nil(err) + + // Force a config reload. + barrier := newBarrier() + t.configLoader.EXPECT().Load( + []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) + t.runtimeUpdateCallback <- 1 + barrier.wait() + + // Different request. + legacyRequest = common.NewRateLimitRequestLegacy( + "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) + req, err = ratelimit.ConvertLegacyRequest(legacyRequest) + if err != nil { + t.assert.FailNow(err.Error()) + } + + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), + nil} + legacyLimits, err := convertRatelimits(limits) + if err != nil { + t.assert.FailNow(err.Error()) + } + + t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(nil, req, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + response, err = service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) + common.AssertProtoEqual( + t.assert, + &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ + {Code: pb_legacy.RateLimitResponse_OVER_LIMIT, CurrentLimit: legacyLimits[0], LimitRemaining: 0}, + {Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }}, + response) + t.assert.Nil(err) + + // Config load failure. + t.configLoader.EXPECT().Load( + []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager) { + defer barrier.signal() + panic(config.RateLimitConfigError("load error")) + }) + t.runtimeUpdateCallback <- 1 + barrier.wait() + + // Config should still be valid. Also make sure order does not affect results. + limits = []*config.RateLimit{ + nil, + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} + legacyLimits, err = convertRatelimits(limits) + if err != nil { + t.assert.FailNow(err.Error()) + } + + t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(nil, req, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}}) + response, err = service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) + common.AssertProtoEqual( + t.assert, + &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ + {Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb_legacy.RateLimitResponse_OVER_LIMIT, CurrentLimit: legacyLimits[1], LimitRemaining: 0}, + }}, + response) + t.assert.Nil(err) + + t.assert.EqualValues(2, t.statStore.NewCounter("config_load_success").Value()) + t.assert.EqualValues(1, t.statStore.NewCounter("config_load_error").Value()) +} + +func TestEmptyDomainLegacy(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + request := common.NewRateLimitRequestLegacy("", [][][2]string{{{"hello", "world"}}}, 1) + response, err := service.GetLegacyService().ShouldRateLimit(nil, request) + t.assert.Nil(response) + t.assert.Equal("rate limit domain must not be empty", err.Error()) + t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) +} + +func TestEmptyDescriptorsLegacy(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{}, 1) + response, err := service.GetLegacyService().ShouldRateLimit(nil, request) + t.assert.Nil(response) + t.assert.Equal("rate limit descriptor list must not be empty", err.Error()) + t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) +} + +func TestCacheErrorLegacy(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + legacyRequest := common.NewRateLimitRequestLegacy("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) + req, err := ratelimit.ConvertLegacyRequest(legacyRequest) + if err != nil { + t.assert.FailNow(err.Error()) + } + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} + t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) + t.cache.EXPECT().DoLimit(nil, req, limits).Do( + func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { + panic(redis.RedisError("cache error")) + }) + + response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) + t.assert.Nil(response) + t.assert.Equal("cache error", err.Error()) + t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.redis_error").Value()) + t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) +} + +func TestInitialLoadErrorLegacy(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + + t.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( + func(callback chan<- int) { t.runtimeUpdateCallback = callback }) + t.runtime.EXPECT().Snapshot().Return(t.snapshot).MinTimes(1) + t.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) + t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) + t.configLoader.EXPECT().Load( + []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager) { + panic(config.RateLimitConfigError("load error")) + }) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true) + + request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) + response, err := service.GetLegacyService().ShouldRateLimit(nil, request) + t.assert.Nil(response) + t.assert.Equal("no rate limit configuration loaded", err.Error()) + t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) + t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) + +} + +func TestConvertLegacyRequest(test *testing.T) { + req, err := ratelimit.ConvertLegacyRequest(nil) + if err != nil { + assert.FailNow(test, err.Error()) + } + assert.Nil(test, req) + + { + request := &pb_legacy.RateLimitRequest{ + Domain: "test", + Descriptors: nil, + HitsAddend: 10, + } + + expectedRequest := &pb.RateLimitRequest{ + Domain: "test", + Descriptors: nil, + HitsAddend: 10, + } + + req, err := ratelimit.ConvertLegacyRequest(request) + if err != nil { + assert.FailNow(test, err.Error()) + } + + common.AssertProtoEqual(assert.New(test), expectedRequest, req) + } + + { + request := &pb_legacy.RateLimitRequest{ + Domain: "test", + Descriptors: []*pb_struct_legacy.RateLimitDescriptor{}, + HitsAddend: 10, + } + + expectedRequest := &pb.RateLimitRequest{ + Domain: "test", + Descriptors: []*pb_struct.RateLimitDescriptor{}, + HitsAddend: 10, + } + + req, err := ratelimit.ConvertLegacyRequest(request) + if err != nil { + assert.FailNow(test, err.Error()) + } + + common.AssertProtoEqual(assert.New(test), expectedRequest, req) + } + + { + descriptors := []*pb_struct_legacy.RateLimitDescriptor{ + { + Entries: []*pb_struct_legacy.RateLimitDescriptor_Entry{ + { + Key: "foo", + Value: "foo_value", + }, + nil, + }, + }, + { + Entries: []*pb_struct_legacy.RateLimitDescriptor_Entry{}, + }, + { + Entries: nil, + }, + nil, + } + + request := &pb_legacy.RateLimitRequest{ + Domain: "test", + Descriptors: descriptors, + HitsAddend: 10, + } + + expectedDescriptors := []*pb_struct.RateLimitDescriptor{ + { + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "foo", + Value: "foo_value", + }, + nil, + }, + }, + { + Entries: []*pb_struct.RateLimitDescriptor_Entry{}, + }, + { + Entries: nil, + }, + nil, + } + + expectedRequest := &pb.RateLimitRequest{ + Domain: "test", + Descriptors: expectedDescriptors, + HitsAddend: 10, + } + + req, err := ratelimit.ConvertLegacyRequest(request) + if err != nil { + assert.FailNow(test, err.Error()) + } + + common.AssertProtoEqual(assert.New(test), expectedRequest, req) + } +} + +func TestConvertResponse(test *testing.T) { + resp, err := ratelimit.ConvertResponse(nil) + if err != nil { + assert.FailNow(test, err.Error()) + } + assert.Nil(test, resp) + + rl := &pb.RateLimitResponse_RateLimit{ + RequestsPerUnit: 10, + Unit: pb.RateLimitResponse_RateLimit_DAY, + } + + statuses := []*pb.RateLimitResponse_DescriptorStatus{ + { + Code: pb.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 9, + }, + nil, + { + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: rl, + LimitRemaining: 0, + }, + } + + requestHeadersToAdd := []*core.HeaderValue{{ + Key: "test_request", + Value: "test_request_value", + }, nil} + + responseHeadersToAdd := []*core.HeaderValue{{ + Key: "test_response", + Value: "test_response", + }, nil} + + response := &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: statuses, + RequestHeadersToAdd: requestHeadersToAdd, + ResponseHeadersToAdd: responseHeadersToAdd, + } + + expectedRl := &pb_legacy.RateLimitResponse_RateLimit{ + RequestsPerUnit: 10, + Unit: pb_legacy.RateLimitResponse_RateLimit_DAY, + } + + expectedStatuses := []*pb_legacy.RateLimitResponse_DescriptorStatus{ + { + Code: pb_legacy.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 9, + }, + nil, + { + Code: pb_legacy.RateLimitResponse_OVER_LIMIT, + CurrentLimit: expectedRl, + LimitRemaining: 0, + }, + } + + expectedRequestHeadersToAdd := []*core_legacy.HeaderValue{{ + Key: "test_request", + Value: "test_request_value", + }, nil} + + expecpectedResponseHeadersToAdd := []*core_legacy.HeaderValue{{ + Key: "test_response", + Value: "test_response", + }, nil} + + expectedResponse := &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, + Statuses: expectedStatuses, + RequestHeadersToAdd: expectedRequestHeadersToAdd, + Headers: expecpectedResponseHeadersToAdd, + } + + resp, err = ratelimit.ConvertResponse(response) + if err != nil { + assert.FailNow(test, err.Error()) + } + + common.AssertProtoEqual(assert.New(test), expectedResponse, resp) +}